Index: conf/hive-default.xml.template
===================================================================
--- conf/hive-default.xml.template (revision 1418565)
+++ conf/hive-default.xml.template (working copy)
@@ -1258,6 +1258,14 @@
+ hive.security.metastore.authorization.manager
+ org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider
+ authorization manager class name to be used in the metastore for authorization.
+ The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
+
+
+
+
hive.security.authenticator.manager
org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator
hive client authenticator manager class name.
@@ -1265,6 +1273,13 @@
+ hive.security.metastore.authenticator.manager
+ org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator
+ authenticator manager class name to be used in the metastore for authentication.
+ The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
+
+
+
hive.security.authorization.createtable.user.grants
the privileges automatically granted to some users whenever a table gets created.
Index: metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (revision 1418565)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (working copy)
@@ -68,6 +68,8 @@
public static final String DEFAULT_DATABASE_NAME = "default";
public static final String DEFAULT_DATABASE_COMMENT = "Default Hive database";
+ public static final String DATABASE_WAREHOUSE_SUFFIX = ".db";
+
/**
* printStackTrace
*
@@ -1076,4 +1078,5 @@
throw new RuntimeException("Unable to instantiate " + theClass.getName(), e);
}
}
+
}
Index: metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (revision 1418565)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (working copy)
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.metastore;
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DATABASE_WAREHOUSE_SUFFIX;
import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
import java.io.FileNotFoundException;
@@ -164,6 +165,14 @@
return new Path(db.getLocationUri());
}
+ public Path getDefaultDatabasePath(String dbName) throws MetaException {
+ if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
+ return getWhRoot();
+ }
+ return new Path(getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX);
+ }
+
+
public Path getTablePath(Database db, String tableName)
throws MetaException {
return getDnsPath(new Path(getDatabasePath(db), tableName.toLowerCase()));
Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 1418565)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy)
@@ -59,7 +59,6 @@
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -87,6 +86,7 @@
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
@@ -365,6 +365,10 @@
return conf;
}
+ public Warehouse getWh() {
+ return wh;
+ }
+
/**
* Get a cached RawStore.
*
@@ -397,7 +401,7 @@
} catch (NoSuchObjectException e) {
ms.createDatabase(
new Database(DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT,
- getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null));
+ wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null));
}
HMSHandler.createDefaultDB = true;
}
@@ -515,22 +519,13 @@
return counters;
}
- private static final String DATABASE_WAREHOUSE_SUFFIX = ".db";
-
- private Path getDefaultDatabasePath(String dbName) throws MetaException {
- if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
- return wh.getWhRoot();
- }
- return new Path(wh.getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX);
- }
-
private void create_database_core(RawStore ms, final Database db)
throws AlreadyExistsException, InvalidObjectException, MetaException {
if (!validateName(db.getName())) {
throw new InvalidObjectException(db.getName() + " is not a valid database name");
}
if (null == db.getLocationUri()) {
- db.setLocationUri(getDefaultDatabasePath(db.getName()).toString());
+ db.setLocationUri(wh.getDefaultDatabasePath(db.getName()).toString());
} else {
db.setLocationUri(wh.getDnsPath(new Path(db.getLocationUri())).toString());
}
Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
===================================================================
--- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1418565)
+++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy)
@@ -612,6 +612,11 @@
"org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider"),
HIVE_AUTHENTICATOR_MANAGER("hive.security.authenticator.manager",
"org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator"),
+ HIVE_METASTORE_AUTHORIZATION_MANAGER("hive.security.metastore.authorization.manager",
+ "org.apache.hadoop.hive.ql.security.authorization."
+ + "DefaultHiveMetastoreAuthorizationProvider"),
+ HIVE_METASTORE_AUTHENTICATOR_MANAGER("hive.security.metastore.authenticator.manager",
+ "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator"),
HIVE_AUTHORIZATION_TABLE_USER_GRANTS("hive.security.authorization.createtable.user.grants", ""),
HIVE_AUTHORIZATION_TABLE_GROUP_GRANTS("hive.security.authorization.createtable.group.grants",
""),
Index: ql/src/test/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java
===================================================================
--- ql/src/test/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java (revision 0)
+++ ql/src/test/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java (revision 0)
@@ -0,0 +1,303 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.security;
+
+import java.io.IOException;
+import java.net.ServerSocket;
+import java.util.ArrayList;
+import java.util.List;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.hive.cli.CliSessionState;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.security.DummyHiveMetastoreAuthorizationProvider.AuthCallContext;
+import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.shims.ShimLoader;
+
+/**
+ * TestAuthorizationPreEventListener. Test case for
+ * {@link org.apache.hadoop.hive.metastore.AuthorizationPreEventListener} and
+ * {@link org.apache.hadoop.hive.metastore.MetaStorePreEventListener}
+ */
+public class TestAuthorizationPreEventListener extends TestCase {
+ private HiveConf clientHiveConf;
+ private HiveMetaStoreClient msc;
+ private Driver driver;
+
+ @Override
+ protected void setUp() throws Exception {
+
+ super.setUp();
+
+ int port = MetaStoreUtils.findFreePort();
+
+ System.setProperty(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname,
+ AuthorizationPreEventListener.class.getName());
+ System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_MANAGER.varname,
+ DummyHiveMetastoreAuthorizationProvider.class.getName());
+ System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname,
+ HadoopDefaultMetastoreAuthenticator.class.getName());
+
+ MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+
+ clientHiveConf = new HiveConf(this.getClass());
+
+ clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
+ clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
+ clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+
+ clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+ clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+
+
+ SessionState.start(new CliSessionState(clientHiveConf));
+ msc = new HiveMetaStoreClient(clientHiveConf, null);
+ driver = new Driver(clientHiveConf);
+ }
+
+ private static String getFreeAvailablePort() throws IOException {
+ ServerSocket socket = new ServerSocket(0);
+ socket.setReuseAddress(true);
+ int port = socket.getLocalPort();
+ socket.close();
+ return "" + port;
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ super.tearDown();
+ }
+
+ private void validateCreateDb(Database expectedDb, Database actualDb) {
+ assertEquals(expectedDb.getName(), actualDb.getName());
+ assertEquals(expectedDb.getLocationUri(), actualDb.getLocationUri());
+ }
+
+ private void validateTable(Table expectedTable, Table actualTable) {
+ assertEquals(expectedTable.getTableName(), actualTable.getTableName());
+ assertEquals(expectedTable.getDbName(), actualTable.getDbName());
+
+ // We won't try to be too strict in checking this because we're comparing
+ // table create intents with observed tables created.
+ // If it does have a location though, we will compare, as with external tables
+ if ((actualTable.getSd() != null) && (actualTable.getSd().getLocation() != null)){
+ assertEquals(expectedTable.getSd().getLocation(), actualTable.getSd().getLocation());
+ }
+ }
+
+ private void validateCreateTable(Table expectedTable, Table actualTable) {
+ validateTable(expectedTable, actualTable);
+ }
+
+ private void validateAddPartition(Partition expectedPartition, Partition actualPartition) {
+ validatePartition(expectedPartition,actualPartition);
+ }
+
+ private void validatePartition(Partition expectedPartition, Partition actualPartition) {
+ assertEquals(expectedPartition.getValues(),
+ actualPartition.getValues());
+ assertEquals(expectedPartition.getDbName(),
+ actualPartition.getDbName());
+ assertEquals(expectedPartition.getTableName(),
+ actualPartition.getTableName());
+
+ // assertEquals(expectedPartition.getSd().getLocation(),
+ // actualPartition.getSd().getLocation());
+ // we don't compare locations, because the location can still be empty in
+ // the pre-event listener before it is created.
+
+ assertEquals(expectedPartition.getSd().getInputFormat(),
+ actualPartition.getSd().getInputFormat());
+ assertEquals(expectedPartition.getSd().getOutputFormat(),
+ actualPartition.getSd().getOutputFormat());
+ assertEquals(expectedPartition.getSd().getSerdeInfo(),
+ actualPartition.getSd().getSerdeInfo());
+
+ }
+
+ private void validateAlterPartition(Partition expectedOldPartition,
+ Partition expectedNewPartition, String actualOldPartitionDbName,
+ String actualOldPartitionTblName,List actualOldPartitionValues,
+ Partition actualNewPartition) {
+ assertEquals(expectedOldPartition.getValues(), actualOldPartitionValues);
+ assertEquals(expectedOldPartition.getDbName(), actualOldPartitionDbName);
+ assertEquals(expectedOldPartition.getTableName(), actualOldPartitionTblName);
+
+ validatePartition(expectedNewPartition, actualNewPartition);
+ }
+
+ private void validateAlterTable(Table expectedOldTable, Table expectedNewTable,
+ Table actualOldTable, Table actualNewTable) {
+ validateTable(expectedOldTable, actualOldTable);
+ validateTable(expectedNewTable, actualNewTable);
+ }
+
+ private void validateDropPartition(Partition expectedPartition, Partition actualPartition) {
+ validatePartition(expectedPartition, actualPartition);
+ }
+
+ private void validateDropTable(Table expectedTable, Table actualTable) {
+ validateTable(expectedTable, actualTable);
+ }
+
+ private void validateDropDb(Database expectedDb, Database actualDb) {
+ assertEquals(expectedDb, actualDb);
+ }
+
+ public void testListener() throws Exception {
+ String dbName = "tmpdb";
+ String tblName = "tmptbl";
+ String renamed = "tmptbl2";
+ int listSize = 0;
+
+ List authCalls = DummyHiveMetastoreAuthorizationProvider.authCalls;
+ assertEquals(authCalls.size(),listSize);
+
+ driver.run("create database " + dbName);
+ listSize++;
+ Database db = msc.getDatabase(dbName);
+
+ Database dbFromEvent = (Database)assertAndExtractSingleObjectFromEvent(listSize, authCalls,
+ DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.DB);
+ validateCreateDb(db,dbFromEvent);
+
+ driver.run("use " + dbName);
+ driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName));
+ listSize++;
+ Table tbl = msc.getTable(dbName, tblName);
+
+ Table tblFromEvent = (
+ (org.apache.hadoop.hive.ql.metadata.Table)
+ assertAndExtractSingleObjectFromEvent(listSize, authCalls,
+ DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.TABLE))
+ .getTTable();
+ validateCreateTable(tbl, tblFromEvent);
+
+ driver.run("alter table tmptbl add partition (b='2011')");
+ listSize++;
+ Partition part = msc.getPartition("tmpdb", "tmptbl", "b=2011");
+
+ Partition ptnFromEvent = (
+ (org.apache.hadoop.hive.ql.metadata.Partition)
+ assertAndExtractSingleObjectFromEvent(listSize, authCalls,
+ DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.PARTITION))
+ .getTPartition();
+ validateAddPartition(part,ptnFromEvent);
+
+ driver.run(String.format("alter table %s touch partition (%s)", tblName, "b='2011'"));
+ listSize++;
+
+ //the partition did not change,
+ // so the new partition should be similar to the original partition
+ Partition modifiedP = msc.getPartition(dbName, tblName, "b=2011");
+
+ Partition ptnFromEventAfterAlter = (
+ (org.apache.hadoop.hive.ql.metadata.Partition)
+ assertAndExtractSingleObjectFromEvent(listSize, authCalls,
+ DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.PARTITION))
+ .getTPartition();
+
+ validateAlterPartition(part, modifiedP, ptnFromEventAfterAlter.getDbName(),
+ ptnFromEventAfterAlter.getTableName(), ptnFromEventAfterAlter.getValues(),
+ ptnFromEventAfterAlter);
+
+
+ List part_vals = new ArrayList();
+ part_vals.add("c=2012");
+ Partition newPart = msc.appendPartition(dbName, tblName, part_vals);
+
+ listSize++;
+
+ Partition newPtnFromEvent = (
+ (org.apache.hadoop.hive.ql.metadata.Partition)
+ assertAndExtractSingleObjectFromEvent(listSize, authCalls,
+ DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.PARTITION))
+ .getTPartition();
+ validateAddPartition(newPart,newPtnFromEvent);
+
+
+ driver.run(String.format("alter table %s rename to %s", tblName, renamed));
+ listSize++;
+
+ Table renamedTable = msc.getTable(dbName, renamed);
+ Table renamedTableFromEvent = (
+ (org.apache.hadoop.hive.ql.metadata.Table)
+ assertAndExtractSingleObjectFromEvent(listSize, authCalls,
+ DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.TABLE))
+ .getTTable();
+
+ validateAlterTable(tbl, renamedTable, renamedTableFromEvent,
+ renamedTable);
+ assertFalse(tbl.getTableName().equals(renamedTable.getTableName()));
+
+
+ //change the table name back
+ driver.run(String.format("alter table %s rename to %s", renamed, tblName));
+ listSize++;
+
+ driver.run(String.format("alter table %s drop partition (b='2011')", tblName));
+ listSize++;
+
+ Partition ptnFromDropPartition = (
+ (org.apache.hadoop.hive.ql.metadata.Partition)
+ assertAndExtractSingleObjectFromEvent(listSize, authCalls,
+ DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.PARTITION))
+ .getTPartition();
+
+ validateDropPartition(modifiedP, ptnFromDropPartition);
+
+ driver.run("drop table " + tblName);
+ listSize++;
+ Table tableFromDropTableEvent = (
+ (org.apache.hadoop.hive.ql.metadata.Table)
+ assertAndExtractSingleObjectFromEvent(listSize, authCalls,
+ DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.TABLE))
+ .getTTable();
+
+
+ validateDropTable(tbl, tableFromDropTableEvent);
+
+ driver.run("drop database " + dbName);
+ listSize++;
+ Database dbFromDropDatabaseEvent =
+ (Database)assertAndExtractSingleObjectFromEvent(listSize, authCalls,
+ DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.DB);
+
+ validateDropDb(db, dbFromDropDatabaseEvent);
+ }
+
+ public Object assertAndExtractSingleObjectFromEvent(int listSize,
+ List authCalls,
+ DummyHiveMetastoreAuthorizationProvider.AuthCallContextType callType) {
+ assertEquals(listSize, authCalls.size());
+ assertEquals(1,authCalls.get(listSize-1).authObjects.size());
+
+ assertEquals(callType,authCalls.get(listSize-1).type);
+ return (authCalls.get(listSize-1).authObjects.get(0));
+ }
+
+}
Index: ql/src/test/org/apache/hadoop/hive/ql/security/TestDefaultHiveMetastoreAuthorizationProvider.java
===================================================================
--- ql/src/test/org/apache/hadoop/hive/ql/security/TestDefaultHiveMetastoreAuthorizationProvider.java (revision 0)
+++ ql/src/test/org/apache/hadoop/hive/ql/security/TestDefaultHiveMetastoreAuthorizationProvider.java (revision 0)
@@ -0,0 +1,226 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.security;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.hive.cli.CliSessionState;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener;
+import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/**
+ * TestDefaultHiveMetaStoreAuthorizationProvider. Test case for
+ * DefaultHiveMetastoreAuthorizationProvider
+ * using {@link org.apache.hadoop.hive.metastore.AuthorizationPreEventListener}
+ *
+ * Note that while we do use the hive driver to test, that is mostly for test
+ * writing ease, and it has the same effect as using a metastore client directly
+ * because we disable hive client-side authorization for this test, and only
+ * turn on server-side auth.
+ */
+public class TestDefaultHiveMetastoreAuthorizationProvider extends TestCase {
+ private HiveConf clientHiveConf;
+ private HiveMetaStoreClient msc;
+ private Driver driver;
+ private UserGroupInformation ugi;
+
+ @Override
+ protected void setUp() throws Exception {
+
+ super.setUp();
+
+ int port = MetaStoreUtils.findFreePort();
+
+ System.setProperty(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname,
+ AuthorizationPreEventListener.class.getName());
+ System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_MANAGER.varname,
+ DefaultHiveMetastoreAuthorizationProvider.class.getName());
+ System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname,
+ InjectableDummyAuthenticator.class.getName());
+ System.setProperty(HiveConf.ConfVars.HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS.varname, "");
+
+
+ MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+
+ clientHiveConf = new HiveConf(this.getClass());
+
+ clientHiveConf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED,false);
+
+ clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
+ clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
+ clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+
+ clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+ clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+
+ ugi = ShimLoader.getHadoopShims().getUGIForConf(clientHiveConf);
+
+ SessionState.start(new CliSessionState(clientHiveConf));
+ msc = new HiveMetaStoreClient(clientHiveConf, null);
+ driver = new Driver(clientHiveConf);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ super.tearDown();
+ }
+
+ private void validateCreateDb(Database expectedDb, String dbName) {
+ assertEquals(expectedDb.getName(), dbName);
+ }
+
+ private void validateCreateTable(Table expectedTable, String tblName, String dbName) {
+ assertNotNull(expectedTable);
+ assertEquals(expectedTable.getTableName(),tblName);
+ assertEquals(expectedTable.getDbName(),dbName);
+ }
+
+ public void testSimplePrivileges() throws Exception {
+ String dbName = "smpdb";
+ String tblName = "smptbl";
+
+ String userName = ugi.getUserName();
+
+ CommandProcessorResponse ret = driver.run("create database " + dbName);
+ assertEquals(0,ret.getResponseCode());
+ Database db = msc.getDatabase(dbName);
+
+ validateCreateDb(db,dbName);
+
+ driver.run("use " + dbName);
+ ret = driver.run(
+ String.format("create table %s (a string) partitioned by (b string)", tblName));
+
+ assertEquals(1,ret.getResponseCode());
+ // failure from not having permissions to create table
+
+ ArrayList fields = new ArrayList(2);
+ fields.add(new FieldSchema("a", serdeConstants.STRING_TYPE_NAME, ""));
+
+ Table ttbl = new Table();
+ ttbl.setDbName(dbName);
+ ttbl.setTableName(tblName);
+ StorageDescriptor sd = new StorageDescriptor();
+ ttbl.setSd(sd);
+ sd.setCols(fields);
+ sd.setParameters(new HashMap());
+ sd.getParameters().put("test_param_1", "Use this for comments etc");
+ sd.setSerdeInfo(new SerDeInfo());
+ sd.getSerdeInfo().setName(ttbl.getTableName());
+ sd.getSerdeInfo().setParameters(new HashMap());
+ sd.getSerdeInfo().getParameters().put(
+ org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1");
+ sd.getSerdeInfo().setSerializationLib(
+ org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
+ ttbl.setPartitionKeys(new ArrayList());
+
+ MetaException me = null;
+ try {
+ msc.createTable(ttbl);
+ } catch (MetaException e){
+ me = e;
+ }
+ assertNotNull(me);
+ assertTrue(me.getMessage().indexOf("No privilege") != -1);
+
+ driver.run("grant create on database "+dbName+" to user "+userName);
+
+ driver.run("use " + dbName);
+ ret = driver.run(
+ String.format("create table %s (a string) partitioned by (b string)", tblName));
+
+ assertEquals(0,ret.getResponseCode()); // now it succeeds.
+ Table tbl = msc.getTable(dbName, tblName);
+
+ validateCreateTable(tbl,tblName, dbName);
+
+ String fakeUser = "mal";
+ List fakeGroupNames = new ArrayList();
+ fakeGroupNames.add("groupygroup");
+
+ InjectableDummyAuthenticator.injectUserName(fakeUser);
+ InjectableDummyAuthenticator.injectGroupNames(fakeGroupNames);
+ InjectableDummyAuthenticator.injectMode(true);
+
+ ret = driver.run(
+ String.format("create table %s (a string) partitioned by (b string)", tblName+"mal"));
+
+ assertEquals(1,ret.getResponseCode());
+
+ ttbl.setTableName(tblName+"mal");
+ me = null;
+ try {
+ msc.createTable(ttbl);
+ } catch (MetaException e){
+ me = e;
+ }
+ assertNotNull(me);
+ assertTrue(me.getMessage().indexOf("No privilege") != -1);
+
+ ret = driver.run("alter table "+tblName+" add partition (b='2011')");
+ assertEquals(1,ret.getResponseCode());
+
+ List ptnVals = new ArrayList();
+ ptnVals.add("b=2011");
+ Partition tpart = new Partition();
+ tpart.setDbName(dbName);
+ tpart.setTableName(tblName);
+ tpart.setValues(ptnVals);
+ tpart.setParameters(new HashMap());
+ tpart.setSd(tbl.getSd().deepCopy());
+ tpart.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy());
+ tpart.getSd().setLocation(tbl.getSd().getLocation() + "/tpart");
+
+ me = null;
+ try {
+ msc.add_partition(tpart);
+ } catch (MetaException e){
+ me = e;
+ }
+ assertNotNull(me);
+ assertTrue(me.getMessage().indexOf("No privilege") != -1);
+
+ InjectableDummyAuthenticator.injectMode(false);
+
+ ret = driver.run("alter table "+tblName+" add partition (b='2011')");
+ assertEquals(0,ret.getResponseCode());
+
+ }
+
+}
Index: ql/src/test/org/apache/hadoop/hive/ql/security/DummyHiveMetastoreAuthorizationProvider.java
===================================================================
--- ql/src/test/org/apache/hadoop/hive/ql/security/DummyHiveMetastoreAuthorizationProvider.java (revision 0)
+++ ql/src/test/org/apache/hadoop/hive/ql/security/DummyHiveMetastoreAuthorizationProvider.java (revision 0)
@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.security;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.ql.metadata.AuthorizationException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider;
+import org.apache.hadoop.hive.ql.security.authorization.Privilege;
+
+public class DummyHiveMetastoreAuthorizationProvider implements HiveMetastoreAuthorizationProvider {
+
+
+ protected HiveAuthenticationProvider authenticator;
+
+ public enum AuthCallContextType {
+ USER,
+ DB,
+ TABLE,
+ PARTITION,
+ TABLE_AND_PARTITION
+ };
+
+ class AuthCallContext {
+
+ public AuthCallContextType type;
+ public List