commit d5f640174e838469709e40e22947af6f85b29766 Author: Daniel Dai Date: Tue Apr 10 14:37:07 2018 -0700 HIVE-19161: Add authorizations to information schema diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 0627c35..92dbf4a 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -2911,6 +2911,11 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR_INTERVAL("hive.server2.clear.dangling.scratchdir.interval", "1800s", new TimeValidator(TimeUnit.SECONDS), "Interval to clear dangling scratch dir periodically in HS2"), + HIVE_SERVER2_PRIVILEGE_SYNCHRONIZER("hive.server2.privilege.synchronizer", true, + "Synchronize privileges from external authorizer such as ranger to Hive periodically in HS2"), + HIVE_SERVER2_PRIVILEGE_SYNCHRONIZER_INTERVAL("hive.server2.privilege.synchronizer.interval", + "1800s", new TimeValidator(TimeUnit.SECONDS), + "Interval to synchronize privileges from external authorizer periodically in HS2"), HIVE_SERVER2_SLEEP_INTERVAL_BETWEEN_START_ATTEMPTS("hive.server2.sleep.interval.between.start.attempts", "60s", new TimeValidator(TimeUnit.MILLISECONDS, 0l, true, Long.MAX_VALUE, true), "Amount of time to sleep between HiveServer2 start attempts. Primarily meant for tests"), diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml index f473d25..cf91da7 100644 --- a/itests/hive-unit/pom.xml +++ b/itests/hive-unit/pom.xml @@ -46,6 +46,11 @@ org.apache.hive + hive-jdbc-handler + ${project.version} + + + org.apache.hive hive-service ${project.version} diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestInformationSchemaWithPrivilege.java b/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestInformationSchemaWithPrivilege.java new file mode 100644 index 0000000..1f8df89 --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestInformationSchemaWithPrivilege.java @@ -0,0 +1,541 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hive.service.server; + +import java.io.File; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator; +import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessController; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizationValidator; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizer; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizerFactory; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizerImpl; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveMetastoreClientFactory; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePolicyChangeListener; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePolicyProvider; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveResourceACLs; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType; +import org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAccessControllerWrapper; +import org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizationValidator; +import org.apache.hive.beeline.BeeLine; +import org.apache.hive.jdbc.miniHS2.MiniHS2; +import org.apache.hive.service.cli.CLIServiceClient; +import org.apache.hive.service.cli.OperationHandle; +import org.apache.hive.service.cli.RowSet; +import org.apache.hive.service.cli.SessionHandle; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestInformationSchemaWithPrivilege { + + // Group mapping: + // group_a: user1, user2 + // group_b: user2 + static class FakeGroupAuthenticator extends HadoopDefaultAuthenticator { + @Override + public List getGroupNames() { + List groups = new ArrayList(); + if (getUserName().equals("user1")) { + groups.add("group_a"); + } else if (getUserName().equals("user2")) { + groups.add("group_a"); + groups.add("group_b"); + } + return groups; + } + } + + // Privilege matrix: + // user1 user2 group_a group_b public + // testdb1: S S + // testtable1.*: SU S + // testtable2.*: S + // testtable3.*: S + // testtable4.*: S + // testdb2: S + // testtable1.key S + static class TestHivePolicyProvider implements HivePolicyProvider { + @Override + public HiveResourceACLs getResourceACLs(HivePrivilegeObject hiveObject) { + HiveResourceACLs acls = new HiveResourceACLs(); + if (hiveObject.getType() == HivePrivilegeObjectType.DATABASE) { + if (hiveObject.getDbname().equals("testdb1")) { + acls.addUserEntry("user1", HiveResourceACLs.Privilege.SELECT, HiveResourceACLs.AccessResult.ALLOWED); + acls.addGroupEntry("group_a", HiveResourceACLs.Privilege.SELECT, HiveResourceACLs.AccessResult.ALLOWED); + } else if (hiveObject.getDbname().equals("testdb2")) { + acls.addUserEntry("user1", HiveResourceACLs.Privilege.SELECT, HiveResourceACLs.AccessResult.ALLOWED); + } + } else if (hiveObject.getType() == HivePrivilegeObjectType.TABLE_OR_VIEW) { + if (hiveObject.getDbname().equals("testdb1") &&hiveObject.getObjectName().equals("testtable1")) { + acls.addUserEntry("user1", HiveResourceACLs.Privilege.SELECT, HiveResourceACLs.AccessResult.ALLOWED); + acls.addUserEntry("user1", HiveResourceACLs.Privilege.UPDATE, HiveResourceACLs.AccessResult.ALLOWED); + acls.addUserEntry("user2", HiveResourceACLs.Privilege.SELECT, HiveResourceACLs.AccessResult.ALLOWED); + } else if (hiveObject.getDbname().equals("testdb1") && hiveObject.getObjectName().equals("testtable2")) { + acls.addGroupEntry("group_a", HiveResourceACLs.Privilege.SELECT, HiveResourceACLs.AccessResult.ALLOWED); + } else if (hiveObject.getDbname().equals("testdb1") && hiveObject.getObjectName().equals("testtable3")) { + acls.addGroupEntry("public", HiveResourceACLs.Privilege.SELECT, HiveResourceACLs.AccessResult.ALLOWED); + } else if (hiveObject.getDbname().equals("testdb1") && hiveObject.getObjectName().equals("testtable4")) { + acls.addGroupEntry("group_b", HiveResourceACLs.Privilege.SELECT, HiveResourceACLs.AccessResult.ALLOWED); + } else if (hiveObject.getDbname().equals("testdb2") && hiveObject.getObjectName().equals("testtable1")) { + acls.addUserEntry("user1", HiveResourceACLs.Privilege.SELECT, HiveResourceACLs.AccessResult.ALLOWED); + } + } else if (hiveObject.getType() == HivePrivilegeObjectType.COLUMN) { + if (hiveObject.getDbname().equals("testdb1") &&hiveObject.getObjectName().equals("testtable1")) { + acls.addUserEntry("user1", HiveResourceACLs.Privilege.SELECT, HiveResourceACLs.AccessResult.ALLOWED); + acls.addUserEntry("user2", HiveResourceACLs.Privilege.SELECT, HiveResourceACLs.AccessResult.ALLOWED); + } else if (hiveObject.getDbname().equals("testdb1") && hiveObject.getObjectName().equals("testtable2")) { + acls.addGroupEntry("group_a", HiveResourceACLs.Privilege.SELECT, HiveResourceACLs.AccessResult.ALLOWED); + } else if (hiveObject.getDbname().equals("testdb1") && hiveObject.getObjectName().equals("testtable3")) { + acls.addGroupEntry("public", HiveResourceACLs.Privilege.SELECT, HiveResourceACLs.AccessResult.ALLOWED); + } else if (hiveObject.getDbname().equals("testdb1") && hiveObject.getObjectName().equals("testtable4")) { + acls.addGroupEntry("group_b", HiveResourceACLs.Privilege.SELECT, HiveResourceACLs.AccessResult.ALLOWED); + } else if (hiveObject.getDbname().equals("testdb2") && hiveObject.getObjectName().equals("testtable1") + && hiveObject.getColumns().get(0).equals("key")) { + acls.addUserEntry("user1", HiveResourceACLs.Privilege.SELECT, HiveResourceACLs.AccessResult.ALLOWED); + } + } + return acls; + } + + @Override + public void registerHivePolicyChangeListener(HivePolicyChangeListener listener) { + // PolicyChangeListener will be implemented later + } + } + + static class HiveAuthorizerImplWithPolicyProvider extends HiveAuthorizerImpl { + + public HiveAuthorizerImplWithPolicyProvider(HiveAccessController accessController, HiveAuthorizationValidator authValidator) { + super(accessController, authValidator); + } + + @Override + public HivePolicyProvider getHivePolicyProvider() throws HiveAuthzPluginException { + return new TestHivePolicyProvider(); + } + } + + static class TestHiveAuthorizerFactory implements HiveAuthorizerFactory { + @Override + public HiveAuthorizer createHiveAuthorizer(HiveMetastoreClientFactory metastoreClientFactory, + HiveConf conf, HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) throws HiveAuthzPluginException { + SQLStdHiveAccessControllerWrapper privilegeManager = + new SQLStdHiveAccessControllerWrapper(metastoreClientFactory, conf, authenticator, ctx); + return new HiveAuthorizerImplWithPolicyProvider( + privilegeManager, + new SQLStdHiveAuthorizationValidator(metastoreClientFactory, conf, authenticator, + privilegeManager, ctx) + ); + } + } + + private static MiniHS2 miniHS2 = null; + private static MiniZooKeeperCluster zkCluster = null; + private static Map confOverlay; + + @BeforeClass + public static void beforeTest() throws Exception { + File zkDataDir = new File(System.getProperty("test.tmp.dir")); + zkCluster = new MiniZooKeeperCluster(); + int zkPort = zkCluster.startup(zkDataDir); + + miniHS2 = new MiniHS2(new HiveConf()); + Map confOverlay = new HashMap(); + confOverlay.put(ConfVars.HIVE_SERVER2_PRIVILEGE_SYNCHRONIZER.varname, "true"); + confOverlay.put(ConfVars.HIVE_SERVER2_PRIVILEGE_SYNCHRONIZER_INTERVAL.varname, "1"); + confOverlay.put(ConfVars.HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY.varname, "true"); + confOverlay.put(ConfVars.HIVE_AUTHORIZATION_MANAGER.varname, TestHiveAuthorizerFactory.class.getName()); + confOverlay.put(ConfVars.HIVE_ZOOKEEPER_QUORUM.varname, "localhost"); + confOverlay.put(ConfVars.HIVE_ZOOKEEPER_CLIENT_PORT.varname, Integer.toString(zkPort)); + confOverlay.put(MetastoreConf.ConfVars.AUTO_CREATE_ALL.getVarname(), "true"); + confOverlay.put(ConfVars.HIVE_AUTHENTICATOR_MANAGER.varname, FakeGroupAuthenticator.class.getName()); + miniHS2.start(confOverlay); + } + + @Test + public void test() throws Exception { + + String db1Name = "testdb1"; + String db2Name = "testdb2"; + String table1Name = "testtable1"; + String table2Name = "testtable2"; + String table3Name = "testtable3"; + String table4Name = "testtable4"; + CLIServiceClient serviceClient = miniHS2.getServiceClient(); + SessionHandle sessHandle = serviceClient.openSession("hive_test_user", ""); + serviceClient.executeStatement(sessHandle, "DROP DATABASE IF EXISTS " + db1Name + " CASCADE", confOverlay); + serviceClient.executeStatement(sessHandle, "CREATE DATABASE " + db1Name, confOverlay); + serviceClient.executeStatement(sessHandle, "DROP TABLE IF EXISTS " + db1Name + "." + table1Name, confOverlay); + serviceClient.executeStatement(sessHandle, "CREATE TABLE " + db1Name + "." + table1Name + "(key string, value double)", confOverlay); + serviceClient.executeStatement(sessHandle, "DROP TABLE IF EXISTS " + db1Name + "." + table2Name, confOverlay); + serviceClient.executeStatement(sessHandle, "CREATE TABLE " + db1Name + "." + table2Name + "(key string, value double)", confOverlay); + serviceClient.executeStatement(sessHandle, "DROP VIEW IF EXISTS " + db1Name + "." + table3Name, confOverlay); + serviceClient.executeStatement(sessHandle, "CREATE VIEW " + db1Name + "." + table3Name + " AS SELECT * FROM " + db1Name + "." + table1Name, confOverlay); + serviceClient.executeStatement(sessHandle, "DROP TABLE IF EXISTS " + db1Name + "." + table4Name, confOverlay); + serviceClient.executeStatement(sessHandle, "CREATE TABLE " + db1Name + "." + table4Name + "(key string, value double) PARTITIONED BY (p string)", confOverlay); + + serviceClient.executeStatement(sessHandle, "DROP DATABASE IF EXISTS " + db2Name + " CASCADE", confOverlay); + serviceClient.executeStatement(sessHandle, "CREATE DATABASE " + db2Name, confOverlay); + serviceClient.executeStatement(sessHandle, "DROP TABLE IF EXISTS " + db2Name + "." + table1Name, confOverlay); + serviceClient.executeStatement(sessHandle, "CREATE TABLE " + db2Name + "." + table1Name + "(key string, value double)", confOverlay); + + // Just to trigger auto creation of needed metastore tables + serviceClient.executeStatement(sessHandle, "SHOW GRANT USER hive_test_user ON ALL", confOverlay); + serviceClient.closeSession(sessHandle); + + List baseArgs = new ArrayList(); + baseArgs.add("-d"); + baseArgs.add(BeeLine.BEELINE_DEFAULT_JDBC_DRIVER); + baseArgs.add("-u"); + baseArgs.add(miniHS2.getBaseJdbcURL()); + baseArgs.add("-n"); + baseArgs.add("hive_test_user"); + + List args = new ArrayList(baseArgs); + args.add("-f"); + args.add("../../metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql"); + BeeLine beeLine = new BeeLine(); + int result = beeLine.begin(args.toArray(new String[]{}), null); + beeLine.close(); + Assert.assertEquals(result, 0); + + boolean containsDb1 = false; + boolean containsDb2 = false; + boolean containsDb1Table1 = false; + boolean containsDb1Table2 = false; + boolean containsDb1Table3 = false; + boolean containsDb1Table4 = false; + boolean containsDb2Table1 = false; + boolean containsDb1Table1SelectPriv = false; + boolean containsDb1Table1UpdatePriv = false; + boolean containsDb1Table2SelectPriv = false; + boolean containsDb1Table3SelectPriv = false; + boolean containsDb1Table4SelectPriv = false; + boolean containsDb2Table1SelectPriv = false; + boolean containsDb1Table1Key = false; + boolean containsDb1Table1Value = false; + boolean containsDb1Table2Key = false; + boolean containsDb1Table2Value = false; + boolean containsDb1Table3Key = false; + boolean containsDb1Table3Value = false; + boolean containsDb1Table4Key = false; + boolean containsDb1Table4Value = false; + boolean containsDb1Table4P = false; + boolean containsDb2Table1Key = false; + + // We shall have enough time to synchronize privileges during loading information schema + miniHS2.getHiveConf().set(ConfVars.HIVE_SERVER2_PRIVILEGE_SYNCHRONIZER.varname, "false"); + + // User1 privileges: + // testdb1: S + // testtable1.*: SU + // testtable2.*: S + // testtable3.*: S + // testtable4.*: + // testdb2: S + // testtable1.*: S + sessHandle = serviceClient.openSession("user1", ""); + OperationHandle opHandle = serviceClient.executeStatement(sessHandle, "select * from INFORMATION_SCHEMA.SCHEMATA", confOverlay); + RowSet rowSet = serviceClient.fetchResults(opHandle); + Assert.assertEquals(rowSet.numRows(), 2); + Iterator iter = rowSet.iterator(); + while (iter.hasNext()) { + Object[] cols = iter.next(); + if (cols[1].equals(db1Name)) { + containsDb1 = true; + } else if (cols[1].equals(db2Name)) { + containsDb2 = true; + } + } + Assert.assertTrue(containsDb1 && containsDb2); + + opHandle = serviceClient.executeStatement(sessHandle, "select * from INFORMATION_SCHEMA.TABLES", confOverlay); + rowSet = serviceClient.fetchResults(opHandle); + Assert.assertEquals(rowSet.numRows(), 4); + iter = rowSet.iterator(); + while (iter.hasNext()) { + Object[] cols = iter.next(); + if (cols[1].equals(db1Name) && cols[2].equals(table1Name)) { + containsDb1Table1 = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table2Name)) { + containsDb1Table2 = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table3Name)) { + containsDb1Table3 = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table4Name)) { + containsDb1Table4 = true; + } else if (cols[1].equals(db2Name) && cols[2].equals(table1Name)) { + containsDb2Table1 = true; + } + } + Assert.assertTrue(containsDb1Table1 && containsDb1Table2 && containsDb1Table3 && !containsDb1Table4 + && containsDb2Table1); + + opHandle = serviceClient.executeStatement(sessHandle, "select * from INFORMATION_SCHEMA.VIEWS", confOverlay); + rowSet = serviceClient.fetchResults(opHandle); + Assert.assertEquals(rowSet.numRows(), 1); + iter = rowSet.iterator(); + while (iter.hasNext()) { + Object[] cols = iter.next(); + if (cols[1].equals(db1Name) && cols[2].equals(table3Name)) { + containsDb1Table3 = true; + } else { + containsDb1Table3 = false; + } + } + Assert.assertTrue(containsDb1Table3); + + opHandle = serviceClient.executeStatement(sessHandle, "select * from INFORMATION_SCHEMA.TABLE_PRIVILEGES", confOverlay); + rowSet = serviceClient.fetchResults(opHandle); + Assert.assertEquals(rowSet.numRows(), 5); + iter = rowSet.iterator(); + while (iter.hasNext()) { + Object[] cols = iter.next(); + if (cols[3].equals(db1Name) && cols[4].equals(table1Name) && cols[5].equals("SELECT")) { + containsDb1Table1SelectPriv = true; + } if (cols[3].equals(db1Name) && cols[4].equals(table1Name) && cols[5].equals("UPDATE")) { + containsDb1Table1UpdatePriv = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table2Name) && cols[5].equals("SELECT")) { + containsDb1Table2SelectPriv = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table3Name) && cols[5].equals("SELECT")) { + containsDb1Table3SelectPriv = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table4Name) && cols[5].equals("SELECT")) { + containsDb1Table4SelectPriv = true; + } else if (cols[3].equals(db2Name) && cols[4].equals(table1Name) && cols[5].equals("SELECT")) { + containsDb2Table1SelectPriv = true; + } + } + Assert.assertTrue(containsDb1Table1SelectPriv && containsDb1Table1UpdatePriv + && containsDb1Table2SelectPriv && containsDb1Table3SelectPriv + && !containsDb1Table4SelectPriv && containsDb2Table1SelectPriv); + + opHandle = serviceClient.executeStatement(sessHandle, "select * from INFORMATION_SCHEMA.COLUMNS", confOverlay); + rowSet = serviceClient.fetchResults(opHandle); + Assert.assertEquals(rowSet.numRows(), 7); + iter = rowSet.iterator(); + while (iter.hasNext()) { + Object[] cols = iter.next(); + if (cols[1].equals(db1Name) && cols[2].equals(table1Name) && cols[3].equals("key")) { + containsDb1Table1Key = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table1Name) && cols[3].equals("value")) { + containsDb1Table1Value = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table2Name) && cols[3].equals("key")) { + containsDb1Table2Key = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table2Name) && cols[3].equals("value")) { + containsDb1Table2Value = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table3Name) && cols[3].equals("key")) { + containsDb1Table3Key = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table3Name) && cols[3].equals("value")) { + containsDb1Table3Value = true; + } else if (cols[1].equals(db2Name) && cols[2].equals(table1Name) && cols[3].equals("key")) { + containsDb2Table1Key = true; + } + } + Assert.assertTrue(containsDb1Table1Key && containsDb1Table1Value && containsDb1Table2Key + && containsDb1Table2Value && containsDb1Table3Key && containsDb1Table3Value && containsDb2Table1Key); + + containsDb1Table1Key = false; + containsDb1Table1Value = false; + containsDb1Table2Key = false; + containsDb1Table2Value = false; + containsDb1Table3Key = false; + containsDb1Table3Value = false; + containsDb2Table1Key = false; + opHandle = serviceClient.executeStatement(sessHandle, "select * from INFORMATION_SCHEMA.COLUMN_PRIVILEGES", confOverlay); + rowSet = serviceClient.fetchResults(opHandle); + Assert.assertEquals(rowSet.numRows(), 7); + iter = rowSet.iterator(); + while (iter.hasNext()) { + Object[] cols = iter.next(); + if (cols[3].equals(db1Name) && cols[4].equals(table1Name) && cols[5].equals("key")) { + containsDb1Table1Key = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table1Name) && cols[5].equals("value")) { + containsDb1Table1Value = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table2Name) && cols[5].equals("key")) { + containsDb1Table2Key = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table2Name) && cols[5].equals("value")) { + containsDb1Table2Value = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table3Name) && cols[5].equals("key")) { + containsDb1Table3Key = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table3Name) && cols[5].equals("value")) { + containsDb1Table3Value = true; + } else if (cols[3].equals(db2Name) && cols[4].equals(table1Name) && cols[5].equals("key")) { + containsDb2Table1Key = true; + } + } + Assert.assertTrue(containsDb1Table1Key && containsDb1Table1Value && containsDb1Table2Key + && containsDb1Table2Value && containsDb1Table3Key && containsDb1Table3Value && containsDb2Table1Key); + serviceClient.closeSession(sessHandle); + + // User2 privileges: + // testdb1: S + // testtable1.*: S + // testtable2.*: S + // testtable3.*: S + // testtable4.*: S + // testdb2: + // testtable1.*: + sessHandle = serviceClient.openSession("user2", ""); + opHandle = serviceClient.executeStatement(sessHandle, "select * from INFORMATION_SCHEMA.SCHEMATA", confOverlay); + rowSet = serviceClient.fetchResults(opHandle); + Assert.assertEquals(rowSet.numRows(), 1); + iter = rowSet.iterator(); + while (iter.hasNext()) { + Object[] cols = iter.next(); + if (cols[1].equals(db1Name)) { + containsDb1 = true; + } + } + Assert.assertTrue(containsDb1); + + opHandle = serviceClient.executeStatement(sessHandle, "select * from INFORMATION_SCHEMA.TABLES", confOverlay); + rowSet = serviceClient.fetchResults(opHandle); + Assert.assertEquals(rowSet.numRows(), 4); + iter = rowSet.iterator(); + while (iter.hasNext()) { + Object[] cols = iter.next(); + if (cols[1].equals(db1Name) && cols[2].equals(table1Name)) { + containsDb1Table1 = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table2Name)) { + containsDb1Table2 = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table3Name)) { + containsDb1Table3 = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table4Name)) { + containsDb1Table4 = true; + } + } + Assert.assertTrue(containsDb1Table1 && containsDb1Table2 && containsDb1Table3 && containsDb1Table4); + + opHandle = serviceClient.executeStatement(sessHandle, "select * from INFORMATION_SCHEMA.VIEWS", confOverlay); + rowSet = serviceClient.fetchResults(opHandle); + Assert.assertEquals(rowSet.numRows(), 1); + iter = rowSet.iterator(); + while (iter.hasNext()) { + Object[] cols = iter.next(); + if (cols[1].equals(db1Name) && cols[2].equals(table3Name)) { + containsDb1Table3 = true; + } else { + containsDb1Table3 = false; + } + } + Assert.assertTrue(containsDb1Table3); + + opHandle = serviceClient.executeStatement(sessHandle, "select * from INFORMATION_SCHEMA.TABLE_PRIVILEGES", confOverlay); + rowSet = serviceClient.fetchResults(opHandle); + Assert.assertEquals(rowSet.numRows(), 4); + iter = rowSet.iterator(); + while (iter.hasNext()) { + Object[] cols = iter.next(); + if (cols[3].equals(db1Name) && cols[4].equals(table1Name) && cols[5].equals("SELECT")) { + containsDb1Table1SelectPriv = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table2Name) && cols[5].equals("SELECT")) { + containsDb1Table2SelectPriv = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table3Name) && cols[5].equals("SELECT")) { + containsDb1Table3SelectPriv = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table4Name) && cols[5].equals("SELECT")) { + containsDb1Table4SelectPriv = true; + } + } + Assert.assertTrue(containsDb1Table1SelectPriv && containsDb1Table2SelectPriv + && containsDb1Table3SelectPriv && containsDb1Table4SelectPriv); + + // db1.testtable3.p should also be in COLUMNS, will fix in separate ticket + opHandle = serviceClient.executeStatement(sessHandle, "select * from INFORMATION_SCHEMA.COLUMNS", confOverlay); + rowSet = serviceClient.fetchResults(opHandle); + Assert.assertEquals(rowSet.numRows(), 8); + iter = rowSet.iterator(); + while (iter.hasNext()) { + Object[] cols = iter.next(); + if (cols[1].equals(db1Name) && cols[2].equals(table1Name) && cols[3].equals("key")) { + containsDb1Table1Key = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table1Name) && cols[3].equals("value")) { + containsDb1Table1Value = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table2Name) && cols[3].equals("key")) { + containsDb1Table2Key = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table2Name) && cols[3].equals("value")) { + containsDb1Table2Value = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table3Name) && cols[3].equals("key")) { + containsDb1Table3Key = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table3Name) && cols[3].equals("value")) { + containsDb1Table3Value = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table4Name) && cols[3].equals("key")) { + containsDb1Table4Key = true; + } else if (cols[1].equals(db1Name) && cols[2].equals(table4Name) && cols[3].equals("value")) { + containsDb1Table4Value = true; + } + } + Assert.assertTrue(containsDb1Table1Key && containsDb1Table1Value && containsDb1Table2Key + && containsDb1Table2Value && containsDb1Table3Key && containsDb1Table3Value + && containsDb1Table4Key && containsDb1Table4Value); + + containsDb1Table1Key = false; + containsDb1Table1Value = false; + containsDb1Table2Key = false; + containsDb1Table2Value = false; + containsDb1Table3Key = false; + containsDb1Table3Value = false; + containsDb1Table4Key = false; + containsDb1Table4Value = false; + containsDb1Table4P = false; + opHandle = serviceClient.executeStatement(sessHandle, "select * from INFORMATION_SCHEMA.COLUMN_PRIVILEGES", confOverlay); + rowSet = serviceClient.fetchResults(opHandle); + Assert.assertEquals(rowSet.numRows(), 9); + iter = rowSet.iterator(); + while (iter.hasNext()) { + Object[] cols = iter.next(); + if (cols[3].equals(db1Name) && cols[4].equals(table1Name) && cols[5].equals("key")) { + containsDb1Table1Key = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table1Name) && cols[5].equals("value")) { + containsDb1Table1Value = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table2Name) && cols[5].equals("key")) { + containsDb1Table2Key = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table2Name) && cols[5].equals("value")) { + containsDb1Table2Value = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table3Name) && cols[5].equals("key")) { + containsDb1Table3Key = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table3Name) && cols[5].equals("value")) { + containsDb1Table3Value = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table4Name) && cols[5].equals("key")) { + containsDb1Table4Key = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table4Name) && cols[5].equals("value")) { + containsDb1Table4Value = true; + } else if (cols[3].equals(db1Name) && cols[4].equals(table4Name) && cols[5].equals("p")) { + containsDb1Table4P = true; + } + } + Assert.assertTrue(containsDb1Table1Key && containsDb1Table1Value && containsDb1Table2Key + && containsDb1Table2Value && containsDb1Table3Key && containsDb1Table3Value + && containsDb1Table4Key && containsDb1Table4Value && containsDb1Table4P); + serviceClient.closeSession(sessHandle); + + } +} diff --git a/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql b/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql index 3444439..d4eab4e 100644 --- a/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql +++ b/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql @@ -1067,16 +1067,21 @@ CREATE VIEW IF NOT EXISTS `SCHEMATA` `DEFAULT_CHARACTER_SET_NAME`, `SQL_PATH` ) AS -SELECT +SELECT DISTINCT 'default', - `NAME`, - `OWNER_NAME`, + D.`NAME`, + D.`OWNER_NAME`, cast(null as string), cast(null as string), cast(null as string), `DB_LOCATION_URI` FROM - sys.DBS; + sys.DBS D, `sys`.`TBLS` T, `sys`.`TBL_PRIVS` P +WHERE + D.`DB_ID` = T.`DB_ID` + AND T.`TBL_ID` = P.`TBL_ID` + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR (array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP'); CREATE VIEW IF NOT EXISTS `TABLES` ( @@ -1093,7 +1098,7 @@ CREATE VIEW IF NOT EXISTS `TABLES` `IS_TYPED`, `COMMIT_ACTION` ) AS -SELECT +SELECT DISTINCT 'default', D.NAME, T.TBL_NAME, @@ -1107,9 +1112,13 @@ SELECT 'NO', cast(null as string) FROM - `sys`.`TBLS` T, `sys`.`DBS` D + `sys`.`TBLS` T, `sys`.`DBS` D, `sys`.`TBL_PRIVS` P WHERE - D.`DB_ID` = T.`DB_ID`; + D.`DB_ID` = T.`DB_ID` + AND T.`TBL_ID` = P.`TBL_ID` + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR (array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP') + AND P.`TBL_PRIV`='SELECT'; CREATE VIEW IF NOT EXISTS `TABLE_PRIVILEGES` ( @@ -1122,9 +1131,9 @@ CREATE VIEW IF NOT EXISTS `TABLE_PRIVILEGES` `IS_GRANTABLE`, `WITH_HIERARCHY` ) AS -SELECT - `GRANTOR`, - `PRINCIPAL_NAME`, +SELECT DISTINCT + P.`GRANTOR`, + P.`PRINCIPAL_NAME`, 'default', D.`NAME`, T.`TBL_NAME`, @@ -1134,10 +1143,15 @@ SELECT FROM sys.`TBL_PRIVS` P, sys.`TBLS` T, - sys.`DBS` D + sys.`DBS` D, + sys.`TBL_PRIVS` P2 WHERE - P.TBL_ID = T.TBL_ID - AND T.DB_ID = D.DB_ID; + P.`TBL_ID` = T.`TBL_ID` + AND T.`DB_ID` = D.`DB_ID` + AND P.`TBL_ID` = P2.`TBL_ID` AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` + AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' + OR (array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP') + AND P2.`TBL_PRIV`='SELECT'; CREATE VIEW IF NOT EXISTS `COLUMNS` ( @@ -1189,7 +1203,7 @@ CREATE VIEW IF NOT EXISTS `COLUMNS` `DECLARED_NUMERIC_PRECISION`, `DECLARED_NUMERIC_SCALE` ) AS -SELECT +SELECT DISTINCT 'default', D.NAME, T.TBL_NAME, @@ -1281,11 +1295,17 @@ FROM sys.`COLUMNS_V2` C, sys.`SDS` S, sys.`TBLS` T, - sys.`DBS` D + sys.`DBS` D, + sys.`TBL_COL_PRIVS` P WHERE S.`SD_ID` = T.`SD_ID` AND T.`DB_ID` = D.`DB_ID` - AND C.`CD_ID` = S.`CD_ID`; + AND C.`CD_ID` = S.`CD_ID` + AND T.`TBL_ID` = P.`TBL_ID` + AND C.`COLUMN_NAME` = P.`COLUMN_NAME` + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR (array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP') + AND P.`TBL_COL_PRIV`='SELECT'; CREATE VIEW IF NOT EXISTS `COLUMN_PRIVILEGES` ( @@ -1298,27 +1318,29 @@ CREATE VIEW IF NOT EXISTS `COLUMN_PRIVILEGES` `PRIVILEGE_TYPE`, `IS_GRANTABLE` ) AS -SELECT - `GRANTOR`, - `PRINCIPAL_NAME`, +SELECT DISTINCT + P.`GRANTOR`, + P.`PRINCIPAL_NAME`, 'default', D.`NAME`, T.`TBL_NAME`, - C.`COLUMN_NAME`, + P.`COLUMN_NAME`, P.`TBL_COL_PRIV`, IF (P.`GRANT_OPTION` == 0, 'NO', 'YES') FROM sys.`TBL_COL_PRIVS` P, sys.`TBLS` T, sys.`DBS` D, - sys.`COLUMNS_V2` C, - sys.`SDS` S + sys.`SDS` S, + sys.`TBL_PRIVS` P2 WHERE S.`SD_ID` = T.`SD_ID` AND T.`DB_ID` = D.`DB_ID` AND P.`TBL_ID` = T.`TBL_ID` - AND P.`COLUMN_NAME` = C.`COLUMN_NAME` - AND C.`CD_ID` = S.`CD_ID`; + AND P.`TBL_ID` = P2.`TBL_ID` AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` + AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' + OR (array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP') + AND P2.`TBL_PRIV`='SELECT'; CREATE VIEW IF NOT EXISTS `VIEWS` ( @@ -1333,7 +1355,7 @@ CREATE VIEW IF NOT EXISTS `VIEWS` `IS_TRIGGER_DELETABLE`, `IS_TRIGGER_INSERTABLE_INTO` ) AS -SELECT +SELECT DISTINCT 'default', D.NAME, T.TBL_NAME, @@ -1346,7 +1368,12 @@ SELECT false FROM `sys`.`DBS` D, - `sys`.`TBLS` T + `sys`.`TBLS` T, + `sys`.`TBL_PRIVS` P WHERE - D.`DB_ID` = T.`DB_ID` AND - length(T.VIEW_ORIGINAL_TEXT) > 0; + D.`DB_ID` = T.`DB_ID` + AND length(T.VIEW_ORIGINAL_TEXT) > 0 + AND T.`TBL_ID` = P.`TBL_ID` + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR (array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP') + AND P.`TBL_PRIV`='SELECT'; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java index d59bf1f..ed599e0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java @@ -356,6 +356,7 @@ system.registerGenericUDF("current_date", GenericUDFCurrentDate.class); system.registerGenericUDF("current_timestamp", GenericUDFCurrentTimestamp.class); system.registerGenericUDF("current_user", GenericUDFCurrentUser.class); + system.registerGenericUDF("current_groups", GenericUDFCurrentGroups.class); system.registerGenericUDF("logged_in_user", GenericUDFLoggedInUser.class); system.registerGenericUDF("isnull", GenericUDFOPNull.class); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeSynchonizer.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeSynchonizer.java new file mode 100644 index 0000000..02b481a --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeSynchonizer.java @@ -0,0 +1,201 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.security.authorization; + +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import org.apache.curator.framework.recipes.leader.LeaderLatch; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; +import org.apache.hadoop.hive.metastore.api.HiveObjectType; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveMetastoreClientFactoryImpl; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePolicyProvider; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveResourceACLs; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PrivilegeSynchonizer implements Runnable { + + private static final Logger LOG = LoggerFactory.getLogger(PrivilegeSynchonizer.class); + public static final String GRANTOR = "ranger"; + private IMetaStoreClient hiveClient; + private HivePolicyProvider policyProvider; + private LeaderLatch privilegeSynchonizerLatch; + private HiveConf hiveConf; + + public PrivilegeSynchonizer(LeaderLatch privilegeSynchonizerLatch, HivePolicyProvider policyProvider, + HiveConf hiveConf) { + try { + hiveClient = new HiveMetastoreClientFactoryImpl().getHiveMetastoreClient(); + } catch (HiveAuthzPluginException e) { + throw new RuntimeException("Error creating getHiveMetastoreClient", e); + } + this.privilegeSynchonizerLatch = privilegeSynchonizerLatch; + this.policyProvider = policyProvider; + this.hiveConf = hiveConf; + } + + private void addACLsToBag(Map> principalAclsMap, PrivilegeBag privBag, + HiveObjectType objectType, String dbName, String tblName, String columnName, PrincipalType principalType) { + + for (Map.Entry> principalAcls : principalAclsMap.entrySet()) { + String principal = principalAcls.getKey(); + for (Map.Entry acl : principalAcls.getValue().entrySet()) { + if (acl.getValue() == HiveResourceACLs.AccessResult.ALLOWED) { + switch (objectType) { + case DATABASE: + privBag.addToPrivileges(new HiveObjectPrivilege(new HiveObjectRef( + HiveObjectType.DATABASE, dbName, null, null, null), principal, principalType, + new PrivilegeGrantInfo(acl.getKey().toString(), (int)(System.currentTimeMillis() / 1000), GRANTOR, PrincipalType.USER, false))); + break; + case TABLE: + privBag.addToPrivileges(new HiveObjectPrivilege(new HiveObjectRef( + HiveObjectType.TABLE, dbName, tblName, null, null), principal, principalType, + new PrivilegeGrantInfo(acl.getKey().toString(), (int)(System.currentTimeMillis() / 1000), GRANTOR, PrincipalType.USER, false))); + break; + case COLUMN: + privBag.addToPrivileges(new HiveObjectPrivilege(new HiveObjectRef( + HiveObjectType.COLUMN, dbName, tblName, null, columnName), principal, principalType, + new PrivilegeGrantInfo(acl.getKey().toString(), (int)(System.currentTimeMillis() / 1000), GRANTOR, PrincipalType.USER, false))); + break; + default: + throw new RuntimeException("Get unknown object type " + objectType); + } + } + } + } + } + + private void revokePrivileges(HiveObjectType type, String dbName, String tblName) throws Exception { + PrivilegeBag privBag = new PrivilegeBag(); + switch (type) { + case DATABASE: + privBag.addToPrivileges(new HiveObjectPrivilege(new HiveObjectRef( + HiveObjectType.DATABASE, dbName, null, null, null), null, null, + new PrivilegeGrantInfo("*", 0, null, null, false))); + break; + case TABLE: + privBag.addToPrivileges(new HiveObjectPrivilege(new HiveObjectRef( + HiveObjectType.TABLE, dbName, tblName, null, null), null, null, + new PrivilegeGrantInfo("*", 0, null, null, false))); + break; + case COLUMN: + privBag.addToPrivileges(new HiveObjectPrivilege(new HiveObjectRef( + HiveObjectType.COLUMN, dbName, tblName, null, null), null, null, + new PrivilegeGrantInfo("*", 0, null, null, false))); + break; + default: + throw new RuntimeException("Get unknown object type " + type); + } + hiveClient.revoke_privileges(privBag, false); + } + + private void fetchPrivilege(HiveObjectType type, String dbName, String tblName, String columnName) throws Exception { + PrivilegeBag privBag = new PrivilegeBag(); + + HiveResourceACLs objectAcls = null; + + switch(type) { + case DATABASE: + objectAcls = policyProvider.getResourceACLs(new HivePrivilegeObject(HivePrivilegeObjectType.DATABASE, dbName, null)); + break; + + case TABLE: + objectAcls = policyProvider.getResourceACLs(new HivePrivilegeObject(HivePrivilegeObjectType.TABLE_OR_VIEW, dbName, tblName)); + break; + + case COLUMN: + objectAcls = policyProvider.getResourceACLs(new HivePrivilegeObject(HivePrivilegeObjectType.COLUMN, dbName, tblName, null, columnName)); + break; + + default: + throw new RuntimeException("Get unknown object type " + type); + } + + if (objectAcls == null) { + return; + } + + addACLsToBag(objectAcls.getUserPermissions(), privBag, type, dbName, tblName, columnName, PrincipalType.USER); + addACLsToBag(objectAcls.getGroupPermissions(), privBag, type, dbName, tblName, columnName, PrincipalType.GROUP); + + if (privBag.isSetPrivileges()) { + try { + hiveClient.grant_privileges(privBag); + } catch (Exception e) { + // It is possible privileges are granted after PrivilegeSynchonizer revokes + // all privileges, thus result InvalidObjectException of "xxx is already granted". + // We capture such exception here and don't let it fail the synchronization + if (!(e.getCause() instanceof InvalidObjectException)) { + throw e; + } + } + } + } + + @Override + public void run() { + while (true) { + try { + long interval = HiveConf.getTimeVar(hiveConf, ConfVars.HIVE_SERVER2_PRIVILEGE_SYNCHRONIZER_INTERVAL, + TimeUnit.SECONDS); + if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_PRIVILEGE_SYNCHRONIZER)) { + if (!privilegeSynchonizerLatch.await(interval, TimeUnit.SECONDS)) { + continue; + } + LOG.debug("Start synchonize privilege"); + for (String dbName : hiveClient.getAllDatabases()) { + revokePrivileges(HiveObjectType.DATABASE, dbName, null); + fetchPrivilege(HiveObjectType.DATABASE, dbName, null, null); + + for (String tblName : hiveClient.getAllTables(dbName)) { + revokePrivileges(HiveObjectType.TABLE, dbName, tblName); + revokePrivileges(HiveObjectType.COLUMN, dbName, tblName); + fetchPrivilege(HiveObjectType.TABLE, dbName, tblName, null); + Table tbl = hiveClient.getTable(dbName, tblName); + for (FieldSchema fs : tbl.getPartitionKeys()) { + fetchPrivilege(HiveObjectType.COLUMN, dbName, tblName, fs.getName()); + } + for (FieldSchema fs : tbl.getSd().getCols()) { + fetchPrivilege(HiveObjectType.COLUMN, dbName, tblName, fs.getName()); + } + } + } + } + // Wait if no exception happens, otherwise, retry immediately + Thread.sleep(interval * 1000); + LOG.debug("Success synchonize privilege"); + } catch (Exception e) { + LOG.error("Error initializing PrivilegeSynchonizer: " + e.getMessage(), e); + } + } + } +} \ No newline at end of file diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveResourceACLs.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveResourceACLs.java index 53e221f..5edb8f9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveResourceACLs.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveResourceACLs.java @@ -17,34 +17,60 @@ */ package org.apache.hadoop.hive.ql.security.authorization.plugin; +import java.util.HashMap; import java.util.Map; /** * Captures authorization policy information on a {@link HivePrivilegeObject}. */ -public interface HiveResourceACLs { +public class HiveResourceACLs { /** * Privilege types. */ - enum Privilege { + static public enum Privilege { SELECT, UPDATE, CREATE, DROP, ALTER, INDEX, LOCK, READ, WRITE }; /** * Privilege access result. */ - enum AccessResult { + static public enum AccessResult { ALLOWED, NOT_ALLOWED, CONDITIONAL_ALLOWED }; + Map> userPermissions = new HashMap>(); + Map> groupPermissions = new HashMap>(); /** * @return Returns mapping of user name to privilege-access result pairs */ - Map> getUserPermissions(); + public Map> getUserPermissions() { + return userPermissions; + } /** * @return Returns mapping of group name to privilege-access result pairs */ - Map> getGroupPermissions(); + public Map> getGroupPermissions() { + return groupPermissions; + } + public void addUserEntry(String user, Privilege priv, AccessResult result) { + if (userPermissions.containsKey(user)) { + userPermissions.get(user).put(priv, result); + } else { + Map entry = new HashMap(); + entry.put(priv, result); + userPermissions.put(user, entry); + } + } + + public void addGroupEntry(String group, Privilege priv, AccessResult result) { + if (groupPermissions.containsKey(group)) { + groupPermissions.get(group).put(priv, result); + } else { + Map entry = new HashMap(); + entry.put(priv, result); + groupPermissions.put(group, entry); + } + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index 6003ced..cd242f5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -1247,6 +1247,13 @@ public static String getUserFromAuthenticator() { return null; } + public static List getGroupsFromAuthenticator() { + if (SessionState.get() != null && SessionState.get().getAuthenticator() != null) { + return SessionState.get().getAuthenticator().getGroupNames(); + } + return null; + } + static void validateFiles(List newFiles) throws IllegalArgumentException { SessionState ss = SessionState.get(); Configuration conf = (ss == null) ? new Configuration() : ss.getConf(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCurrentGroups.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCurrentGroups.java new file mode 100644 index 0000000..0ebba1a --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCurrentGroups.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.udf.generic; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.ql.exec.Description; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.ql.udf.UDFType; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.io.Text; + +// This function is not a deterministic function, but a runtime constant. +// The return value is constant within a query but can be different between queries. +@UDFType(deterministic = false, runtimeConstant = true) +@Description(name = "current_group", value = "_FUNC_() - Returns current group names", extended = "SessionState GroupsFromAuthenticator") +public class GenericUDFCurrentGroups extends GenericUDF { + protected List currentGroups; + + @Override + public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { + if (arguments.length != 0) { + throw new UDFArgumentLengthException( + "The function CURRENT_GROUPS does not take any arguments, but found " + arguments.length); + } + + if (currentGroups == null) { + List sessGroupsFromAuth = SessionState.getGroupsFromAuthenticator(); + if (sessGroupsFromAuth != null) { + currentGroups = new ArrayList(); + for (String group : sessGroupsFromAuth) { + currentGroups.add(new Text(group)); + } + } + } + + return ObjectInspectorFactory.getStandardListObjectInspector(PrimitiveObjectInspectorFactory.writableStringObjectInspector); + } + + @Override + public Object evaluate(DeferredObject[] arguments) throws HiveException { + return currentGroups; + } + + public List getCurrentGroups() { + return currentGroups; + } + + public void setCurrentGroups(List currentGroups) { + this.currentGroups = currentGroups; + } + + @Override + public String getDisplayString(String[] children) { + return "CURRENT_GROUPS()"; + } + + @Override + public void copyToNewInstance(Object newInstance) throws UDFArgumentException { + super.copyToNewInstance(newInstance); + // Need to preserve currentUser + GenericUDFCurrentGroups other = (GenericUDFCurrentGroups) newInstance; + if (this.currentGroups != null) { + if (currentGroups != null) { + other.currentGroups = new ArrayList(); + for (Text group : currentGroups) { + other.currentGroups.add(new Text(group)); + } + } + } + } +} \ No newline at end of file diff --git a/service/src/java/org/apache/hive/service/server/HiveServer2.java b/service/src/java/org/apache/hive/service/server/HiveServer2.java index 6308c5c..89dd4fe 100644 --- a/service/src/java/org/apache/hive/service/server/HiveServer2.java +++ b/service/src/java/org/apache/hive/service/server/HiveServer2.java @@ -56,6 +56,7 @@ import org.apache.curator.framework.api.BackgroundCallback; import org.apache.curator.framework.api.CuratorEvent; import org.apache.curator.framework.api.CuratorEventType; +import org.apache.curator.framework.recipes.leader.LeaderLatch; import org.apache.curator.framework.recipes.leader.LeaderLatchListener; import org.apache.curator.framework.recipes.nodes.PersistentEphemeralNode; import org.apache.curator.retry.ExponentialBackoffRetry; @@ -80,6 +81,9 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; +import org.apache.hadoop.hive.ql.security.authorization.PrivilegeSynchonizer; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizer; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePolicyProvider; import org.apache.hadoop.hive.ql.session.ClearDanglingScratchDir; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.util.ZooKeeperHiveHelper; @@ -653,6 +657,14 @@ public synchronized void start() { throw new ServiceException(e); } } + + try { + startPrivilegeSynchonizer(hiveConf); + } catch (Exception e) { + LOG.error("Error starting priviledge synchonizer: ", e); + throw new ServiceException(e); + } + if (webServer != null) { try { webServer.start(); @@ -877,6 +889,33 @@ public static void scheduleClearDanglingScratchDir(HiveConf hiveConf, int initia } } + public void startPrivilegeSynchonizer(HiveConf hiveConf) throws Exception { + // schedulePriviledgeSynchonizer depends on zookeeper client setup in addServerInstanceToZooKeeper, + // so hive.server2.support.dynamic.service.discovery must be true + if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_PRIVILEGE_SYNCHRONIZER)) { + if (!hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY)) { + LOG.warn("Cannot start PrivilegeSynchonizer since hive.server2.support.dynamic.service.discovery=false"); + return; + } + String rootNamespace = hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_ZOOKEEPER_NAMESPACE); + String path = ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace + + ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + "leader"; + LeaderLatch privilegeSynchonizerLatch = new LeaderLatch(zooKeeperClient, path); + privilegeSynchonizerLatch.start(); + HiveAuthorizer authorizor = SessionState.get().getAuthorizerV2(); + HivePolicyProvider policyProvider = authorizor.getHivePolicyProvider(); + if (policyProvider == null) { + LOG.warn("Cannot start PrivilegeSynchonizer, policyProvider of " + authorizor.getClass().getName() + + " is null"); + privilegeSynchonizerLatch.close(); + return; + } + Thread privilegeSynchonizerThread = new Thread(new PrivilegeSynchonizer(privilegeSynchonizerLatch, + policyProvider, hiveConf)); + privilegeSynchonizerThread.start(); + } + } + private static void startHiveServer2() throws Throwable { long attempts = 0, maxAttempts = 1; while (true) { diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 2056930..8188004 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -5861,23 +5861,35 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) if (dbObj != null) { String db = hiveObject.getDbName(); boolean found = false; - List dbGrants = this.listPrincipalMDBGrants( - userName, principalType, catName, db); + List dbGrants = null; + if (userName != null) { + dbGrants = this.listPrincipalMDBGrants( + userName, principalType, catName, db); + } else { + dbGrants = this.listDatabaseGrants(catName, db, new QueryWrapper()); + } for (String privilege : privs) { - for (MDBPrivilege dbGrant : dbGrants) { - String dbGrantPriv = dbGrant.getPrivilege(); - if (privilege.equals(dbGrantPriv)) { - found = true; - if (grantOption) { - if (dbGrant.getGrantOption()) { - dbGrant.setGrantOption(false); - } else { - throw new MetaException("User " + userName - + " does not have grant option with privilege " + privilege); + if (privilege.equalsIgnoreCase("*")) { + found = true; + for (MDBPrivilege dbGrant : dbGrants) { + persistentObjs.add(dbGrant); + } + } else { + for (MDBPrivilege dbGrant : dbGrants) { + String dbGrantPriv = dbGrant.getPrivilege(); + if (privilege.equals(dbGrantPriv)) { + found = true; + if (grantOption) { + if (dbGrant.getGrantOption()) { + dbGrant.setGrantOption(false); + } else { + throw new MetaException("User " + userName + + " does not have grant option with privilege " + privilege); + } } + persistentObjs.add(dbGrant); + break; } - persistentObjs.add(dbGrant); - break; } } if (!found) { @@ -5889,24 +5901,37 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { boolean found = false; - List tableGrants = this + List tableGrants = null; + if (userName != null) { + tableGrants = this .listAllMTableGrants(userName, principalType, catName, hiveObject.getDbName(), hiveObject.getObjectName()); + } else { + tableGrants = this + .listMTableGrantsAll(catName, hiveObject.getDbName(), hiveObject.getObjectName()); + } for (String privilege : privs) { - for (MTablePrivilege tabGrant : tableGrants) { - String tableGrantPriv = tabGrant.getPrivilege(); - if (privilege.equalsIgnoreCase(tableGrantPriv)) { - found = true; - if (grantOption) { - if (tabGrant.getGrantOption()) { - tabGrant.setGrantOption(false); - } else { - throw new MetaException("User " + userName - + " does not have grant option with privilege " + privilege); + if (privilege.equalsIgnoreCase("*")) { + found = true; + for (MTablePrivilege tabGrant : tableGrants) { + persistentObjs.add(tabGrant); + } + } else { + for (MTablePrivilege tabGrant : tableGrants) { + String tableGrantPriv = tabGrant.getPrivilege(); + if (privilege.equalsIgnoreCase(tableGrantPriv)) { + found = true; + if (grantOption) { + if (tabGrant.getGrantOption()) { + tabGrant.setGrantOption(false); + } else { + throw new MetaException("User " + userName + + " does not have grant option with privilege " + privilege); + } } + persistentObjs.add(tabGrant); + break; } - persistentObjs.add(tabGrant); - break; } } if (!found) { @@ -5992,26 +6017,47 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } } } else { - List mSecCol = listPrincipalMTableColumnGrants( - userName, principalType, catName, hiveObject.getDbName(), hiveObject - .getObjectName(), hiveObject.getColumnName()); + List mSecCol = null; + if (hiveObject.getColumnName() != null) { + if (userName != null) { + mSecCol = listPrincipalMTableColumnGrants( + userName, principalType, catName, hiveObject.getDbName(), hiveObject + .getObjectName(), hiveObject.getColumnName()); + } else { + mSecCol = listMTableColumnGrantsAll( + catName, hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject.getColumnName()); + } + } else { + if (userName != null) { + mSecCol = listTableAllColumnGrants(catName, hiveObject.getDbName(), hiveObject.getObjectName()); + } else { + mSecCol = listPrincipalMTableColumnGrantsAll(catName, hiveObject.getDbName(), hiveObject.getObjectName()); + } + } boolean found = false; if (mSecCol != null) { for (String privilege : privs) { - for (MTableColumnPrivilege col : mSecCol) { - String colPriv = col.getPrivilege(); - if (colPriv.equalsIgnoreCase(privilege)) { - found = true; - if (grantOption) { - if (col.getGrantOption()) { - col.setGrantOption(false); - } else { - throw new MetaException("User " + userName - + " does not have grant option with privilege " + privilege); + if (privilege.equalsIgnoreCase("*")) { + found = true; + for (MTableColumnPrivilege col : mSecCol) { + persistentObjs.add(col); + } + } else { + for (MTableColumnPrivilege col : mSecCol) { + String colPriv = col.getPrivilege(); + if (colPriv.equalsIgnoreCase(privilege)) { + found = true; + if (grantOption) { + if (col.getGrantOption()) { + col.setGrantOption(false); + } else { + throw new MetaException("User " + userName + + " does not have grant option with privilege " + privilege); + } } + persistentObjs.add(col); + break; } - persistentObjs.add(col); - break; } } if (!found) { @@ -6953,32 +6999,38 @@ private void dropPartitionGrantsNoTxn(String catName, String dbName, String tabl } } - @Override - public List listTableGrantsAll(String catName, String dbName, String tableName) { + private List listMTableGrantsAll(String catName, String dbName, String tableName) { boolean success = false; Query query = null; dbName = normalizeIdentifier(dbName); tableName = normalizeIdentifier(tableName); try { openTransaction(); - LOG.debug("Executing listTableGrantsAll"); + LOG.debug("Executing listMTableGrantsAll"); query = pm.newQuery(MTablePrivilege.class, "table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3"); query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); List mSecurityTabPartList = (List) query.executeWithArray(tableName, dbName, catName); - LOG.debug("Done executing query for listTableGrantsAll"); + LOG.debug("Done executing query for listMTableGrantsAll"); pm.retrieveAll(mSecurityTabPartList); - List result = convertTable(mSecurityTabPartList); success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalAllTableGrants"); - return result; + LOG.debug("Done retrieving all objects for listMTableGrantsAll"); + return mSecurityTabPartList; } finally { - rollbackAndCleanup(success, query); + if (!success) { + rollbackAndCleanup(success, query); + } } } + @Override + public List listTableGrantsAll(String catName, String dbName, String tableName) { + List mSecurityTabPartList = listMTableGrantsAll(catName, dbName, tableName); + return convertTable(mSecurityTabPartList); + } + private List convertTable(List privs) { List result = new ArrayList<>(); for (MTablePrivilege priv : privs) { @@ -7158,16 +7210,44 @@ private void dropPartitionGrantsNoTxn(String catName, String dbName, String tabl } } - @Override - public List listTableColumnGrantsAll(String catName, String dbName, String tableName, - String columnName) { + private List listPrincipalMTableColumnGrantsAll(String catName, String dbName, + String tableName) { boolean success = false; Query query = null; dbName = normalizeIdentifier(dbName); tableName = normalizeIdentifier(tableName); try { openTransaction(); - LOG.debug("Executing listPrincipalTableColumnGrantsAll"); + LOG.debug("Executing listPrincipalMTableColumnGrantsAll"); + query = + pm.newQuery(MTableColumnPrivilege.class, + "table.tableName == t3 && table.database.name == t4 && " + + "table.database.catalogName == t5"); + query.declareParameters("java.lang.String t3, java.lang.String t4, java.lang.String t5"); + List mSecurityTabPartList = + (List) query.executeWithArray(tableName, dbName, + catName); + LOG.debug("Done executing query for listPrincipalMTableColumnGrantsAll"); + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listPrincipalMTableColumnGrantsAll"); + return mSecurityTabPartList; + } finally { + if (!success) { + rollbackAndCleanup(success, query); + } + } + } + + private List listMTableColumnGrantsAll(String catName, String dbName, + String tableName, String columnName) { + boolean success = false; + Query query = null; + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + try { + openTransaction(); + LOG.debug("Executing listMTableColumnGrantsAll"); query = pm.newQuery(MTableColumnPrivilege.class, "table.tableName == t3 && table.database.name == t4 && " + @@ -7177,17 +7257,26 @@ private void dropPartitionGrantsNoTxn(String catName, String dbName, String tabl List mSecurityTabPartList = (List) query.executeWithArray(tableName, dbName, catName, columnName); - LOG.debug("Done executing query for listPrincipalTableColumnGrantsAll"); + LOG.debug("Done executing query for listMTableColumnGrantsAll"); pm.retrieveAll(mSecurityTabPartList); - List result = convertTableCols(mSecurityTabPartList); success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalTableColumnGrantsAll"); - return result; + LOG.debug("Done retrieving all objects for listMTableColumnGrantsAll"); + return mSecurityTabPartList; } finally { - rollbackAndCleanup(success, query); + if (!success) { + rollbackAndCleanup(success, query); + } } } + @Override + public List listTableColumnGrantsAll(String catName, String dbName, String tableName, + String columnName) { + List mSecurityTabPartList = listMTableColumnGrantsAll(catName, + dbName, tableName, columnName); + return convertTableCols(mSecurityTabPartList); + } + private List convertTableCols(List privs) { List result = new ArrayList<>(); for (MTableColumnPrivilege priv : privs) {