diff --git a/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java b/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java index 9d98f50..762852f 100644 --- a/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java +++ b/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java @@ -232,15 +232,10 @@ final class CreateTableHook extends AbstractSemanticAnalyzerHook { // first check if we will allow the user to create table. String storageHandler = desc.getStorageHandler(); if (StringUtils.isEmpty(storageHandler)) { - - authorize(context, desc.getLocation()); tblProps.put(HCatConstants.HCAT_ISD_CLASS, inStorageDriver); tblProps.put(HCatConstants.HCAT_OSD_CLASS, outStorageDriver); } else { - // Create instance of HCatStorageHandler and obtain the - // HiveAuthorizationprovider for the handler and use it - // to authorize. try { HCatStorageHandler storageHandlerInst = HCatUtil .getStorageHandler(context.getConf(), storageHandler); @@ -249,7 +244,6 @@ final class CreateTableHook extends AbstractSemanticAnalyzerHook { // TBD: To pass in the exact read and write privileges. String databaseName = context.getHive().newTable(desc.getTableName()).getDbName(); - auth.authorize(context.getHive().getDatabase(databaseName), null, null); tblProps.put(HCatConstants.HCAT_ISD_CLASS, storageHandlerInst .getInputStorageDriver().getName()); @@ -280,32 +274,4 @@ final class CreateTableHook extends AbstractSemanticAnalyzerHook { desc.setTblProps(tblProps); context.getConf().set(HCatConstants.HCAT_CREATE_TBL_NAME, tableName); } - - private void authorize(HiveSemanticAnalyzerHookContext context, String loc) - throws SemanticException { - - Path tblDir; - Configuration conf = context.getConf(); - try { - Warehouse wh = new Warehouse(conf); - if (loc == null || loc.isEmpty()) { - Hive hive = context.getHive(); - tblDir = wh.getTablePath( - hive.getDatabase(hive.getCurrentDatabase()), tableName) - .getParent(); - } else { - tblDir = wh.getDnsPath(new Path(loc)); - } - - try { - AuthUtils.authorize(tblDir, FsAction.WRITE, conf); - } catch (HCatException e) { - throw new SemanticException(e); - } - } catch (MetaException e) { - throw new SemanticException(e); - } catch (HiveException e) { - throw new SemanticException(e); - } - } } diff --git a/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java b/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index 8387d8e..d974907 100644 --- a/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ b/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -19,29 +19,52 @@ package org.apache.hcatalog.cli.SemanticAnalysis; import java.io.Serializable; import java.util.List; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.derby.tools.sysinfo; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.metadata.AuthorizationException; +import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.InvalidTableException; +import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook; +import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc; +import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; +import org.apache.hadoop.hive.ql.plan.CreateTableDesc; +import org.apache.hadoop.hive.ql.plan.DDLWork; +import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc; +import org.apache.hadoop.hive.ql.plan.DescTableDesc; +import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; +import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; +import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; +import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; +import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; +import org.apache.hadoop.hive.ql.security.authorization.Privilege; +import org.apache.hadoop.util.StringUtils; import org.apache.hcatalog.common.AuthUtils; import org.apache.hcatalog.common.ErrorType; import org.apache.hcatalog.common.HCatException; -public class HCatSemanticAnalyzer extends AbstractSemanticAnalyzerHook { +public class HCatSemanticAnalyzer extends HCatSemanticAnalyzerBase { private AbstractSemanticAnalyzerHook hook; private ASTNode ast; @@ -116,13 +139,7 @@ public class HCatSemanticAnalyzer extends AbstractSemanticAnalyzerHook { switch (ast.getToken().getType()) { case HiveParser.TOK_DESCTABLE: - authorize(getFullyQualifiedName((ASTNode) ast.getChild(0).getChild(0)), context, FsAction.READ, false); - break; - case HiveParser.TOK_SHOWPARTITIONS: - authorize(BaseSemanticAnalyzer.getUnescapedName((ASTNode)ast.getChild(0)), context, FsAction.READ, false); - break; - case HiveParser.TOK_ALTERTABLE_ADDPARTS: case HiveParser.TOK_DROPTABLE: case HiveParser.TOK_ALTERTABLE_ADDCOLS: @@ -131,105 +148,280 @@ public class HCatSemanticAnalyzer extends AbstractSemanticAnalyzerHook { case HiveParser.TOK_ALTERTABLE_PROPERTIES: case HiveParser.TOK_ALTERTABLE_SERIALIZER: case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES: - authorize(BaseSemanticAnalyzer.getUnescapedName((ASTNode)ast.getChild(0)), context, FsAction.WRITE, false); - break; - case HiveParser.TOK_ALTERTABLE_PARTITION: - authorize(BaseSemanticAnalyzer.unescapeIdentifier(((ASTNode)ast.getChild(0)).getChild(0).getText()), context, FsAction.WRITE, false); - break; - case HiveParser.TOK_DESCDATABASE: - case HiveParser.TOK_SWITCHDATABASE: - authorize(BaseSemanticAnalyzer.getUnescapedName((ASTNode)ast.getChild(0)), context, FsAction.READ, true); - break; - + case HiveParser.TOK_SWITCHDATABASE: case HiveParser.TOK_DROPDATABASE: - authorize(BaseSemanticAnalyzer.getUnescapedName((ASTNode)ast.getChild(0)), context, FsAction.WRITE, true); - break; - case HiveParser.TOK_CREATEDATABASE: case HiveParser.TOK_SHOWDATABASES: case HiveParser.TOK_SHOW_TABLESTATUS: case HiveParser.TOK_SHOWTABLES: - // We do no checks for show tables/db , create db. Its always allowed. - - case HiveParser.TOK_CREATETABLE: - // No checks for Create Table, since its not possible to compute location - // here easily. So, it is especially handled in CreateTable post hook. + case HiveParser.TOK_CREATETABLE: break; case HiveParser.TOK_EXPORT: String tableName = BaseSemanticAnalyzer.getUnescapedName(((ASTNode) ast.getChild(0).getChild(0))); LOG.debug("Export for table " + tableName); - authorize(tableName, context, FsAction.READ, false); + //authorize(tableName, context, FsAction.READ, false); break; case HiveParser.TOK_IMPORT: LOG.debug("Import into location " + context.getConf().get("import.destination.dir")); - AuthUtils.authorize(new Path(context.getConf().get("import.destination.dir")), - FsAction.WRITE, context.getConf()); + //AuthUtils.authorize(new Path(context.getConf().get("import.destination.dir")), + // FsAction.WRITE, context.getConf()); break; default: throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, "Unexpected token: "+ast.getToken()); } + + authorizeDDL(context, rootTasks); + } catch(HCatException e){ throw new SemanticException(e); - } catch (MetaException e) { - throw new SemanticException(e); } catch (HiveException e) { throw new SemanticException(e); - } + } if(hook != null){ hook.postAnalyze(context, rootTasks); } } - private void authorize(String name, HiveSemanticAnalyzerHookContext cntxt, FsAction action, boolean isDBOp) - throws MetaException, HiveException, HCatException{ - - - Warehouse wh = new Warehouse(cntxt.getConf()); - if(!isDBOp){ - // Do validations for table path. - Table tbl; - try{ - tbl = cntxt.getHive().getTable(name); + protected void authorizeDDL(HiveSemanticAnalyzerHookContext cntxt, + List> rootTasks) throws HiveException { + + Hive hive = cntxt.getHive(); + + for (Task task : rootTasks) { + if (task.getWork() instanceof DDLWork) { + DDLWork work = (DDLWork)task.getWork(); + + // DB opereations + + CreateDatabaseDesc createDb = work.getCreateDatabaseDesc(); + if (createDb != null) { + Database db = new Database(createDb.getName(), createDb.getComment(), + createDb.getLocationUri(), createDb.getDatabaseProperties()); + authorize(db, Privilege.CREATE); + } + + DropDatabaseDesc dropDb = work.getDropDatabaseDesc(); + if (dropDb != null) { + Database db = cntxt.getHive().getDatabase(dropDb.getDatabaseName()); + authorize(db, Privilege.DROP); + } + + DescDatabaseDesc descDb = work.getDescDatabaseDesc(); + if (descDb != null) { + Database db = cntxt.getHive().getDatabase(descDb.getDatabaseName()); + authorize(db, Privilege.SELECT); + } + + SwitchDatabaseDesc switchDb = work.getSwitchDatabaseDesc(); + if (switchDb != null) { + Database db = cntxt.getHive().getDatabase(switchDb.getDatabaseName()); + authorize(db, Privilege.SELECT); + } + + ShowTablesDesc showTables = work.getShowTblsDesc(); + if (showTables != null) { + String dbName = showTables.getDbName() == null ? + cntxt.getHive().getCurrentDatabase(): + showTables.getDbName(); + authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT); + } + + ShowTableStatusDesc showTableStatus = work.getShowTblStatusDesc(); + if (showTableStatus != null) { + String dbName = showTableStatus.getDbName() == null ? + cntxt.getHive().getCurrentDatabase(): + showTableStatus.getDbName(); + authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT); + } + + //TODO: add alter database support in HCat + + //Table operations. + CreateTableDesc createTable = work.getCreateTblDesc(); + if (createTable != null) { + Table table = cntxt.getHive().newTable(createTable.getTableName()); + if (createTable.getLocation() != null) { + table.setDataLocation(new Path(createTable.getLocation()).toUri()); + } + //authorize first for the db operation + authorize(cntxt.getHive().getDatabase(table.getDbName()), Privilege.CREATE); + + //then authorize against the table operation so that location permissions can be checked + authorize(table, Privilege.CREATE); + } + + DropTableDesc dropTable = work.getDropTblDesc(); + if (dropTable != null) { + if (dropTable.getPartSpecs() == null) { + authorizeTable(cntxt.getHive(), dropTable.getTableName(), Privilege.DROP); + } else { + for (Map partSpec : dropTable.getPartSpecs()) { + Table table = hive.getTable(hive.getCurrentDatabase(), dropTable.getTableName()); + List partitions = hive.getPartitions(table, partSpec); + for (Partition part : partitions) { + authorize(part, Privilege.DROP); + } + } + } + } + + AlterTableDesc alterTable = work.getAlterTblDesc(); + if (alterTable != null) { + Table table = hive.getTable(hive.getCurrentDatabase(), alterTable.getOldName(), false); + + Partition part = null; + if(alterTable.getPartSpec() != null) { + part = hive.getPartition(table, alterTable.getPartSpec(), false); + } + + String newLocation = alterTable.getNewLocation(); + if (alterTable.getOp() == AlterTableDesc.AlterTableTypes.RENAME) { + authorize(table, Privilege.ALTER_DATA); + } else if (alterTable.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) { + if (part != null) { + authorize(part, Privilege.ALTER_DATA); //authorize for the old location, and new location + part.setLocation(newLocation); + authorize(part, Privilege.ALTER_DATA); + } else { + authorize(table, Privilege.ALTER_DATA); //authorize for the old location, and new location + table.getTTable().getSd().setLocation(newLocation); + authorize(table, Privilege.ALTER_DATA); + } + } else { + authorize(table, Privilege.ALTER_METADATA); + } + + + } + + AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc(); + if (addPartitionDesc != null) { + String dbName = getDbName(hive, addPartitionDesc.getDbName()); + Table table = hive.getTable(dbName, addPartitionDesc.getTableName()); + Path partPath = null; + if (addPartitionDesc.getLocation() != null) { + partPath = new Path(table.getPath(), addPartitionDesc.getLocation()); + } + + Partition part = newPartition( + table, addPartitionDesc.getPartSpec(), partPath, + addPartitionDesc.getPartParams(), + addPartitionDesc.getInputFormat(), + addPartitionDesc.getOutputFormat(), + addPartitionDesc.getNumBuckets(), + addPartitionDesc.getCols(), + addPartitionDesc.getSerializationLib(), + addPartitionDesc.getSerdeParams(), + addPartitionDesc.getBucketCols(), + addPartitionDesc.getSortCols()); + + authorize(part, Privilege.CREATE); + } + + DescTableDesc descTable = work.getDescTblDesc(); + if (descTable != null) { + authorizeTable(cntxt.getHive(), descTable.getTableName(), Privilege.SELECT); + } + + ShowPartitionsDesc showParts = work.getShowPartsDesc(); + if (showParts != null) { + authorizeTable(cntxt.getHive(), showParts.getTabName(), Privilege.SELECT); + } } - catch(InvalidTableException ite){ - // Table itself doesn't exist in metastore, nothing to validate. - return; + } + } + + protected Partition newPartition(Table tbl, Map partSpec, + Path location, Map partParams, String inputFormat, String outputFormat, + int numBuckets, List cols, + String serializationLib, Map serdeParams, + List bucketCols, List sortCols) throws HiveException { + + try { + Partition tmpPart = new Partition(tbl, partSpec, location); + org.apache.hadoop.hive.metastore.api.Partition inPart + = tmpPart.getTPartition(); + if (partParams != null) { + inPart.setParameters(partParams); } - Path path = tbl.getPath(); - if(path != null){ - AuthUtils.authorize(wh.getDnsPath(path), action, cntxt.getConf()); - } else{ - // This will happen, if table exists in metastore for a given - // tablename, but has no path associated with it, so there is nothing to check. - // In such cases, do no checks and allow whatever hive behavior is for it. - return; + if (inputFormat != null) { + inPart.getSd().setInputFormat(inputFormat); } - } else{ - // Else, its a DB operation. - Database db = cntxt.getHive().getDatabase(name); - if(null == db){ - // Database doesn't exist, nothing to authorize - return; - } - AuthUtils.authorize(wh.getDatabasePath(db), action, cntxt.getConf()); + if (outputFormat != null) { + inPart.getSd().setOutputFormat(outputFormat); + } + if (numBuckets != -1) { + inPart.getSd().setNumBuckets(numBuckets); + } + if (cols != null) { + inPart.getSd().setCols(cols); + } + if (serializationLib != null) { + inPart.getSd().getSerdeInfo().setSerializationLib(serializationLib); + } + if (serdeParams != null) { + inPart.getSd().getSerdeInfo().setParameters(serdeParams); + } + if (bucketCols != null) { + inPart.getSd().setBucketCols(bucketCols); + } + if (sortCols != null) { + inPart.getSd().setSortCols(sortCols); + } + + return new Partition(tbl, inPart); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); } } - - - private String getFullyQualifiedName(ASTNode ast) { - // Copied verbatim from DDLSemanticAnalyzer, since its private there. - if (ast.getChildCount() == 0) { - return ast.getText(); + + protected String getDbName(Hive hive, String dbName) { + return dbName == null ? hive.getCurrentDatabase() : dbName; + } + + protected void authorize(Database db, Privilege priv) throws AuthorizationException, SemanticException { + try { + getAuthProvider().authorize(db, new Privilege[] {priv}, null); + } catch (HiveException ex) { + throw new SemanticException(ex); + } + } + + protected void authorizeTable(Hive hive, String tableName, Privilege priv) throws AuthorizationException, HiveException { + Table table; + try{ + table = hive.getTable(tableName); + } + catch(InvalidTableException ite){ + // Table itself doesn't exist in metastore, nothing to validate. + return; + } + + authorize(table, priv); + } + + protected void authorize(Table table, Privilege priv) throws AuthorizationException, SemanticException { + try { + getAuthProvider().authorize(table, new Privilege[] {priv}, null); + } catch (HiveException ex) { + throw new SemanticException(ex); + } + } + + protected void authorize(Partition part, Privilege priv) throws AuthorizationException, SemanticException { + try { + getAuthProvider().authorize(part, new Privilege[] {priv}, null); + } catch (HiveException ex) { + throw new SemanticException(ex); } - - return getFullyQualifiedName((ASTNode) ast.getChild(0)) + "." - + getFullyQualifiedName((ASTNode) ast.getChild(1)); } + } diff --git a/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java b/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java new file mode 100644 index 0000000..7698b9d --- /dev/null +++ b/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hcatalog.cli.SemanticAnalysis; + +import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook; +import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider; +import org.apache.hadoop.hive.ql.session.SessionState; + +public class HCatSemanticAnalyzerBase extends AbstractSemanticAnalyzerHook { + + private HiveAuthorizationProvider authProvider; + + public HiveAuthorizationProvider getAuthProvider() { + if (authProvider == null) { + authProvider = SessionState.get().getAuthorizer(); + } + + return authProvider; + } + +} diff --git a/src/java/org/apache/hcatalog/common/AuthUtils.java b/src/java/org/apache/hcatalog/common/AuthUtils.java index 7cba8dc..791fec9 100644 --- a/src/java/org/apache/hcatalog/common/AuthUtils.java +++ b/src/java/org/apache/hcatalog/common/AuthUtils.java @@ -19,12 +19,14 @@ package org.apache.hcatalog.common; import java.io.FileNotFoundException; import java.io.IOException; +import java.util.EnumSet; import javax.security.auth.login.LoginException; import org.apache.commons.lang.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; @@ -37,40 +39,63 @@ import org.apache.hadoop.security.UserGroupInformation; public class AuthUtils { - /** - * @param path non-null - * @param action non-null - * @param conf - * @throws SemanticException - * @throws HCatException - * - * This method validates only for existing path. If path doesn't exist - * there is nothing to validate. So, make sure that path passed in is non-null. - */ - @SuppressWarnings("deprecation") - public static void authorize(final Path path, final FsAction action, final Configuration conf) throws SemanticException, HCatException{ - - if(path == null) { - throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION); - } + public static void checkAuthorized(final FileSystem fs, final Path path, + final EnumSet actions, final Configuration conf, String user, String[] groups) + throws IOException, AccessControlException { + final FileStatus stat; try { - stat = path.getFileSystem(conf).getFileStatus(path); - } catch (FileNotFoundException fnfe){ + stat = fs.getFileStatus(path); + } catch (FileNotFoundException fnfe) { // File named by path doesn't exist; nothing to validate. return; - } - catch (AccessControlException ace) { - throw new HCatException(ErrorType.ERROR_ACCESS_CONTROL, "for path " + path, ace); - } catch (org.apache.hadoop.fs.permission.AccessControlException ace){ + } catch (org.apache.hadoop.fs.permission.AccessControlException ace) { // Older hadoop version will throw this @deprecated Exception. - throw new HCatException(ErrorType.ERROR_ACCESS_CONTROL, "for path " + path, ace); - } catch (IOException ioe){ - throw new SemanticException(ioe); + throw new AccessControlException(ace.getMessage()); } + final FsPermission dirPerms = stat.getPermission(); + final String grp = stat.getGroup(); + + for (FsAction action : actions) { + if (user.equals(stat.getOwner())) { + if (dirPerms.getUserAction().implies(action)) { + continue; + } + } + if (ArrayUtils.contains(groups, grp)) { + if (dirPerms.getGroupAction().implies(action)) { + continue; + } + } + if (dirPerms.getOtherAction().implies(action)) { + continue; + } + throw new AccessControlException("action " + action + " not permitted on path " + + path + " for user " + user); + } + } + + /** + * This method validates only for existing path. If path doesn't exist there + * is nothing to validate. So, make sure that path passed in is non-null. + * @param path + * non-null + * @param actions + * non-null + * @param conf + * @throws SemanticException + * @throws HCatException + */ + public static void authorize(final Path path, final EnumSet actions, final Configuration conf) + throws SemanticException, HCatException { + + if (path == null) { + throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION); + } + final UserGroupInformation ugi; try { ugi = ShimLoader.getHadoopShims().getUGIForConf(conf); @@ -79,29 +104,48 @@ public class AuthUtils { } catch (IOException ioe) { throw new SemanticException(ioe); } - - final FsPermission dirPerms = stat.getPermission(); - - final String user = HiveConf.getBoolVar(conf, ConfVars.METASTORE_USE_THRIFT_SASL) ? - ugi.getShortUserName() : ugi.getUserName(); - final String grp = stat.getGroup(); - if(user.equals(stat.getOwner())){ - if(dirPerms.getUserAction().implies(action)){ - return; - } - throw new HCatException(ErrorType.ERROR_ACCESS_CONTROL, "action " + action + " not permitted on path " + path + " for user " + user); - } - if(ArrayUtils.contains(ugi.getGroupNames(), grp)){ - if(dirPerms.getGroupAction().implies(action)){ - return; + + final String user = HiveConf.getBoolVar(conf, ConfVars.METASTORE_USE_THRIFT_SASL) ? ugi + .getShortUserName() : ugi.getUserName(); + + try { + final FileSystem fs = path.getFileSystem(conf); + + if (fs.exists(path)) { + checkAuthorized(fs, path, actions, conf, user, ugi.getGroupNames()); + } else if (path.getParent() != null) { + //find the ancestor which exists to check it's permissions + Path par = path.getParent(); + while (par != null) { + if (fs.exists(par)) { + break; + } + par = par.getParent(); + } + + checkAuthorized(fs, par, actions, conf, user, ugi.getGroupNames()); } - throw new HCatException(ErrorType.ERROR_ACCESS_CONTROL, "action " + action + " not permitted on path " + path + " for group " + grp); - } - if(dirPerms.getOtherAction().implies(action)){ - return; + + } catch (AccessControlException ex) { + throw new HCatException(ErrorType.ERROR_ACCESS_CONTROL, ex.getMessage()); + } catch (IOException ex) { + throw new SemanticException(ex); } - throw new HCatException(ErrorType.ERROR_ACCESS_CONTROL, "action " + action + " not permitted on path " + path + " for others"); - + } + /** + * This method validates only for existing path. If path doesn't exist there + * is nothing to validate. So, make sure that path passed in is non-null. + * @param path + * non-null + * @param action + * non-null + * @param conf + * @throws SemanticException + * @throws HCatException + */ + public static void authorize(final Path path, final FsAction action, final Configuration conf) + throws SemanticException, HCatException { + authorize(path, EnumSet.of(action), conf); } -} +} \ No newline at end of file diff --git a/src/java/org/apache/hcatalog/security/DelegationAuthorizationProvider.java b/src/java/org/apache/hcatalog/security/DelegationAuthorizationProvider.java new file mode 100644 index 0000000..1777ebf --- /dev/null +++ b/src/java/org/apache/hcatalog/security/DelegationAuthorizationProvider.java @@ -0,0 +1,136 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hcatalog.security; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hive.hbase.HBaseStorageHandler; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.ql.metadata.AuthorizationException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider; +import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider; +import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProviderBase; +import org.apache.hadoop.hive.ql.security.authorization.Privilege; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hcatalog.storagehandler.HCatStorageHandler; + +/** + * A HiveAuthorizationProvider which delegates the authorization requests to + * the underlying AuthorizationProviders obtained from the StorageHandler. + */ +public class DelegationAuthorizationProvider extends HiveAuthorizationProviderBase { + + protected HiveAuthorizationProvider hdfsAuthorizer = new HdfsAuthorizationProvider(); + + protected static Map authProviders = new HashMap(); + + @Override + public void setConf(Configuration conf) { + super.setConf(conf); + hdfsAuthorizer.setConf(conf); + } + + @Override + public void setAuthenticator(HiveAuthenticationProvider authenticator) { + super.setAuthenticator(authenticator); + hdfsAuthorizer.setAuthenticator(authenticator); + } + + static { + registerAuthProvider("org.apache.hadoop.hive.hbase.HBaseStorageHandler", + "org.apache.hcatalog.hbase.HBaseAuthorizationProvider"); + registerAuthProvider("org.apache.hcatalog.hbase.HBaseHCatStorageHandler", + "org.apache.hcatalog.hbase.HBaseAuthorizationProvider"); + } + + //workaround until Hive adds StorageHandler.getAuthorizationProvider(). Remove these parts afterwards + public static void registerAuthProvider(String storageHandlerClass, + String authProviderClass) { + authProviders.put(storageHandlerClass, authProviderClass); + } + + /** Returns the StorageHandler of the Table obtained from the HCatStorageHandler */ + protected HiveAuthorizationProvider getDelegate(Table table) throws HiveException { + HiveStorageHandler handler = table.getStorageHandler(); + + if (handler != null) { + if (handler instanceof HCatStorageHandler) { + return ((HCatStorageHandler) handler).getAuthorizationProvider(); + } else { + String authProviderClass = authProviders.get(handler.getClass().getCanonicalName()); + + if (authProviderClass != null) { + try { + ReflectionUtils.newInstance(getConf().getClassByName(authProviderClass), getConf()); + } catch (ClassNotFoundException ex) { + throw new HiveException("Cannot instantiate delegation AuthotizationProvider"); + } + } + + //else we do not have anything to delegate to + throw new HiveException(String.format("Storage Handler for table:%s is not an instance " + + "of HCatStorageHandler", table.getTableName())); + } + } else { + //return an authorizer for HDFS + return hdfsAuthorizer; + } + } + + @Override + public void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) + throws HiveException, AuthorizationException { + //global authorizations against warehouse hdfs directory + hdfsAuthorizer.authorize(readRequiredPriv, writeRequiredPriv); + } + + @Override + public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) + throws HiveException, AuthorizationException { + //db's are tied to a hdfs location + hdfsAuthorizer.authorize(db, readRequiredPriv, writeRequiredPriv); + } + + @Override + public void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) + throws HiveException, AuthorizationException { + getDelegate(table).authorize(table, readRequiredPriv, writeRequiredPriv); + } + + @Override + public void authorize(Partition part, Privilege[] readRequiredPriv, + Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException { + getDelegate(part.getTable()).authorize(part, readRequiredPriv, writeRequiredPriv); + } + + @Override + public void authorize(Table table, Partition part, List columns, + Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException, + AuthorizationException { + getDelegate(table).authorize(part, readRequiredPriv, writeRequiredPriv); + } +} diff --git a/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java b/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java new file mode 100644 index 0000000..6caa4d8 --- /dev/null +++ b/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java @@ -0,0 +1,190 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hcatalog.security; + +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; + +import java.net.URI; +import java.util.EnumSet; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.ql.metadata.AuthorizationException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProviderBase; +import org.apache.hadoop.hive.ql.security.authorization.Privilege; +import org.apache.hcatalog.common.AuthUtils; +import org.apache.hcatalog.common.HCatException; + +/** + * An AuthorizationProvider, which checks against the data access level permissions on HDFS. + */ +public class HdfsAuthorizationProvider extends HiveAuthorizationProviderBase { + + protected Warehouse wh; + + public HdfsAuthorizationProvider() { + super(); + } + + public HdfsAuthorizationProvider(Configuration conf) { + super(); + setConf(conf); + } + + @Override + public void setConf(Configuration conf) { + super.setConf(conf); + try { + this.wh = new Warehouse(conf); + } catch (MetaException ex) { + throw new RuntimeException(ex); + } + } + + protected FsAction getFsAction(Privilege priv, Path path) { + + switch (priv.getPriv()) { + case ALL : return FsAction.ALL; + case ALTER_DATA : return FsAction.WRITE; + case ALTER_METADATA : return FsAction.WRITE; + case CREATE : return FsAction.WRITE; + case DROP : return FsAction.WRITE; + case INDEX : return FsAction.WRITE; + case LOCK : return FsAction.WRITE; + case SELECT : return FsAction.READ; + case SHOW_DATABASE : return FsAction.READ; + case UNKNOWN : return FsAction.NONE; + default : throw new AuthorizationException("Unknown privilege"); + } + } + + protected EnumSet getFsActions(Privilege[] privs, Path path) { + EnumSet actions = EnumSet.noneOf(FsAction.class); + + if (privs == null) { + return actions; + } + + for (Privilege priv : privs) { + actions.add(getFsAction(priv, path)); + } + + return actions; + } + + private static final String DATABASE_WAREHOUSE_SUFFIX = ".db"; + + private Path getDefaultDatabasePath(String dbName) throws MetaException { + if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { + return wh.getWhRoot(); + } + return new Path(wh.getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX); + } + + protected Path getDbLocation(Database db) throws HiveException { + try { + String location = db.getLocationUri(); + if (location == null) { + return getDefaultDatabasePath(db.getName()); + } else { + return wh.getDnsPath(wh.getDatabasePath(db)); + } + } catch (MetaException ex) { + throw new HiveException(ex.getMessage()); + } + } + + @Override + public void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) + throws HiveException, AuthorizationException { + //Authorize for global level permissions at the warehouse dir + //getDefaultDatabasePath(dbName); + } + + @Override + public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) + throws HiveException, AuthorizationException { + + Path path = getDbLocation(db); + + hdfsAuth(path, readRequiredPriv, writeRequiredPriv); + } + + @Override + public void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) + throws HiveException, AuthorizationException { + + //unlike Hive's model, this can be called at CREATE TABLE as well, since we should authorize + //against the table's declared location + Path path = null; + try { + if (table.getTTable().getSd().getLocation() == null + || table.getTTable().getSd().getLocation().isEmpty()) { + path = wh.getTablePath(hive_db.getDatabase(table.getDbName()), table.getTableName()); + } else { + path = table.getPath(); + } + } catch (MetaException ex) { + throw new HiveException(ex); + } + + hdfsAuth(path, readRequiredPriv, writeRequiredPriv); + } + + @Override + public void authorize(Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) + throws HiveException, AuthorizationException { + + if (part.getLocation() == null) { + authorize(part.getTable(), readRequiredPriv, writeRequiredPriv); + } else { + hdfsAuth(part.getPartitionPath(), readRequiredPriv, writeRequiredPriv); + } + } + + @Override + public void authorize(Table table, Partition part, List columns, + Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException, + AuthorizationException { + } + + protected void hdfsAuth(Path path, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException { + try { + EnumSet actions = getFsActions(readRequiredPriv, path); + actions.addAll(getFsActions(writeRequiredPriv, path)); + if (!actions.isEmpty()) { + AuthUtils.authorize(path, actions, getConf()); + } + } catch (HCatException ex) { + throw new HiveException(ex.getMessage()); + } + } + +} diff --git a/src/test/org/apache/hcatalog/security/TestHdfsAuthorizationProvider.java b/src/test/org/apache/hcatalog/security/TestHdfsAuthorizationProvider.java new file mode 100644 index 0000000..46926b2 --- /dev/null +++ b/src/test/org/apache/hcatalog/security/TestHdfsAuthorizationProvider.java @@ -0,0 +1,575 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hcatalog.security; + +import java.io.IOException; +import java.util.Random; + +import junit.framework.Assert; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hive.cli.CliSessionState; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hcatalog.cli.HCatDriver; +import org.apache.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer; +import org.junit.After; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; + + +public class TestHdfsAuthorizationProvider { + + HCatDriver hcatDriver; + HiveMetaStoreClient msc; + HiveConf conf; + String whDir; + Path whPath; + FileSystem whFs; + Warehouse wh; + Hive hive; + + static FsPermission perm007 = FsPermission.createImmutable((short)0007); //-------rwx + static FsPermission perm070 = FsPermission.createImmutable((short)0070); //----rwx--- + static FsPermission perm700 = FsPermission.createImmutable((short)0700); //-rwx------ + static FsPermission perm755 = FsPermission.createImmutable((short)0755); //-rwxr-xr-x + static FsPermission perm300 = FsPermission.createImmutable((short)0300); //--wx------ + static FsPermission perm500 = FsPermission.createImmutable((short)0500); //-r-x------ + static FsPermission perm555 = FsPermission.createImmutable((short)0555); //-r-xr-xr-x + + @Before + public void setUp() throws Exception { + + conf = new HiveConf(this.getClass()); + conf.set(ConfVars.PREEXECHOOKS.varname, ""); + conf.set(ConfVars.POSTEXECHOOKS.varname, ""); + conf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + + conf.set("hive.metastore.local", "true"); + conf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); + conf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED, true); + conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, + DelegationAuthorizationProvider.class.getCanonicalName()); + + whDir = System.getProperty("test.warehouse.dir", "/tmp/testhdfsauthorization_wh"); + conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, whDir); + + UserGroupInformation ugi = ShimLoader.getHadoopShims().getUGIForConf(conf); + String username = ShimLoader.getHadoopShims().getShortUserName(ugi); + + whPath = new Path(whDir); + whFs = whPath.getFileSystem(conf); + + wh = new Warehouse(conf); + hive = Hive.get(conf); + + //clean up mess in HMS + cleanupHMS(); + + whFs.delete(whPath, true); + whFs.mkdirs(whPath, perm755); + + SessionState.start(new CliSessionState(conf)); + hcatDriver = new HCatDriver(); + } + + public void cleanupHMS() throws HiveException, MetaException, NoSuchObjectException { + for (String dbName : hive.getAllDatabases()) { + if (dbName.equals("default")) { + continue; + } + try { + whFs.setPermission(getDbPath(dbName), perm700); + } catch(IOException ex) { + //ignore + } + hive.dropDatabase(dbName, true, true, true); + } + + //clean tables in default db + for (String tablename : hive.getAllTables("default")) { + hive.dropTable("default", tablename, true, true); + } + } + + @After + public void tearDown() throws IOException { + whFs.close(); + hcatDriver.close(); + Hive.closeCurrent(); + } + + public Path getDbPath(String dbName) throws MetaException, HiveException { + return wh.getDatabasePath(hive.getDatabase(dbName)); + } + + public Path getTablePath(String dbName, String tableName) throws HiveException { + Table table = hive.getTable(dbName, tableName); + return table.getPath(); + } + + public Path getPartPath(String partName, String dbName, String tableName) throws HiveException { + return new Path(getTablePath(dbName, tableName), partName); + } + + /** + * Tests whether the warehouse directory is writable by the current user (as defined by Hadoop) + */ + @Test + public void testWarehouseIsWritable() throws Exception { + Path top = new Path(whPath, "_foobarbaz12_"); + try { + whFs.mkdirs(top); + } finally { + whFs.delete(top, true); + } + } + + @Test + public void testDatabaseOps() throws Exception { + run("SHOW TABLES"); + run("SHOW TABLE EXTENDED LIKE foo1"); + + whFs.setPermission(whPath, perm700); + run("CREATE DATABASE doo"); + run("DESCRIBE DATABASE doo"); + run("USE doo"); + run("SHOW TABLES"); + run("SHOW TABLE EXTENDED LIKE foo1"); + run("DROP DATABASE doo"); + + //custom location + Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb"); + whFs.mkdirs(dbPath, perm700); + run("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri()); + run("DESCRIBE DATABASE doo2", dbPath.toUri()); + run("USE doo2"); + run("SHOW TABLES"); + run("SHOW TABLE EXTENDED LIKE foo1"); + run("DROP DATABASE doo2", dbPath.toUri()); + + //custom non-existing location + run("CREATE DATABASE doo3 LOCATION '%s/subpath'", dbPath.toUri()); + } + + @Test + public void testCreateDatabaseFail1() throws Exception { + whFs.setPermission(whPath, perm500); + runFail("CREATE DATABASE doo"); //in the default location + + whFs.setPermission(whPath, perm555); + runFail("CREATE DATABASE doo2"); + } + + @Test + public void testCreateDatabaseFail2() throws Exception { + //custom location + Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb"); + + whFs.mkdirs(dbPath, perm700); + whFs.setPermission(dbPath, perm500); + runFail("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri()); + } + + @Test + public void testDropDatabaseFail1() throws Exception { + whFs.setPermission(whPath, perm700); + run("CREATE DATABASE doo"); //in the default location + + whFs.setPermission(getDbPath("doo"), perm500); //revoke write + runFail("DROP DATABASE doo"); + } + + @Test + public void testDropDatabaseFail2() throws Exception { + //custom location + Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb"); + + whFs.mkdirs(dbPath, perm700); + run("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri()); + + whFs.setPermission(dbPath, perm500); + runFail("DROP DATABASE doo2"); + } + + @Test + public void testDescSwitchDatabaseFail() throws Exception { + whFs.setPermission(whPath, perm700); + run("CREATE DATABASE doo"); + whFs.setPermission(getDbPath("doo"), perm300); //revoke read + runFail("DESCRIBE DATABASE doo"); + runFail("USE doo"); + + //custom location + Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb"); + whFs.mkdirs(dbPath, perm700); + run("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri()); + whFs.mkdirs(dbPath, perm300); //revoke read + runFail("DESCRIBE DATABASE doo2", dbPath.toUri()); + runFail("USE doo2"); + } + + @Test + public void testShowTablesFail() throws Exception { + whFs.setPermission(whPath, perm700); + run("CREATE DATABASE doo"); + run("USE doo"); + whFs.setPermission(getDbPath("doo"), perm300); //revoke read + runFail("SHOW TABLES"); + runFail("SHOW TABLE EXTENDED LIKE foo1"); + } + + @Test + public void testCreateDropDescTable() throws Exception { + //default db + run("CREATE TABLE foo1 (foo INT) STORED AS RCFILE"); + run("DESCRIBE foo1"); + run("DROP TABLE foo1"); + + //default db custom location + Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable"); + whFs.mkdirs(tablePath, perm700); + run("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath); + run("DESCRIBE foo2"); + run("DROP TABLE foo2"); + + //default db custom non existing location + run("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s/subpath'", tablePath); + run("DESCRIBE foo3"); + run("DROP TABLE foo3"); + + //non default db + run("CREATE DATABASE doo"); + run("USE doo"); + + run("CREATE TABLE foo4 (foo INT) STORED AS RCFILE"); + run("DESCRIBE foo4"); + run("DROP TABLE foo4"); + + //non-default db custom location + tablePath = new Path(whPath, new Random().nextInt() + "/mytable"); + whFs.mkdirs(tablePath, perm700); + run("CREATE EXTERNAL TABLE foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath); + run("DESCRIBE foo5"); + run("DROP TABLE foo5"); + + //non-default db custom non existing location + run("CREATE EXTERNAL TABLE foo6 (foo INT) STORED AS RCFILE LOCATION '%s/subpath'", tablePath); + run("DESCRIBE foo6"); + run("DROP TABLE foo6"); + + run("DROP TABLE IF EXISTS foo_non_exists"); + + run("CREATE TABLE foo1 (foo INT) STORED AS RCFILE"); + run("DESCRIBE EXTENDED foo1"); + run("DESCRIBE FORMATTED foo1"); + run("DESCRIBE foo1.foo"); + + //deep non-existing path for the table + tablePath = new Path(whPath, new Random().nextInt() + "/mytable"); + whFs.mkdirs(tablePath, perm700); + run("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s/a/a/a/'", tablePath); + } + + @Test + public void testCreateTableFail1() throws Exception { + //default db + whFs.mkdirs(whPath, perm500); //revoke w + runFail("CREATE TABLE foo1 (foo INT) STORED AS RCFILE"); + } + + @Test + public void testCreateTableFail2() throws Exception { + //default db custom location + Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable"); + whFs.mkdirs(tablePath, perm500); + runFail("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath); + + //default db custom non existing location + runFail("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s/subpath'", tablePath); + } + + @Test + public void testCreateTableFail3() throws Exception { + //non default db + run("CREATE DATABASE doo"); + whFs.setPermission(getDbPath("doo"), perm500); + + runFail("CREATE TABLE doo.foo4 (foo INT) STORED AS RCFILE"); + + //non-default db custom location, permission to write to tablePath, but not on db path + Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable"); + whFs.mkdirs(tablePath, perm700); + runFail("CREATE EXTERNAL TABLE doo.foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath); + } + + @Test + public void testCreateTableFail4() throws Exception { + //non default db + run("CREATE DATABASE doo"); + + //non-default db custom location + Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable"); + whFs.mkdirs(tablePath, perm500); + runFail("CREATE EXTERNAL TABLE doo.foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath); + + //non-default db custom non existing location + runFail("CREATE EXTERNAL TABLE doo.foo6 (foo INT) STORED AS RCFILE LOCATION '%s/a/a/a/'", tablePath); + } + + @Test + public void testDropTableFail1() throws Exception { + //default db + run("CREATE TABLE foo1 (foo INT) STORED AS RCFILE"); + whFs.mkdirs(getTablePath("default", "foo1"), perm500); //revoke w + runFail("DROP TABLE foo1"); + } + + @Test + public void testDropTableFail2() throws Exception { + //default db custom location + Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable"); + run("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath); + whFs.mkdirs(tablePath, perm500); + runFail("DROP TABLE foo2"); + } + + @Test + public void testDropTableFail4() throws Exception { + //non default db + run("CREATE DATABASE doo"); + + //non-default db custom location + Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable"); + + run("CREATE EXTERNAL TABLE doo.foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath); + whFs.mkdirs(tablePath, perm500); + run("USE doo"); //There is no DROP TABLE doo.foo5 support in Hive + runFail("DROP TABLE foo5"); + } + + @Test + public void testDescTableFail() throws Exception { + //default db + run("CREATE TABLE foo1 (foo INT) STORED AS RCFILE"); + whFs.mkdirs(getTablePath("default", "foo1"), perm300); //revoke read + runFail("DESCRIBE foo1"); + + //default db custom location + Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable"); + whFs.mkdirs(tablePath, perm700); + run("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath); + whFs.mkdirs(tablePath, perm300); //revoke read + runFail("DESCRIBE foo2"); + } + + @Test + public void testAlterTableRename() throws Exception { + run("CREATE TABLE foo1 (foo INT) STORED AS RCFILE"); + run("ALTER TABLE foo1 RENAME TO foo2"); + + Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable"); + run("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath); + run("ALTER TABLE foo3 RENAME TO foo4"); + } + + @Test + public void testAlterTableRenameFail() throws Exception { + run("CREATE TABLE foo1 (foo INT) STORED AS RCFILE"); + whFs.mkdirs(getTablePath("default", "foo1"), perm500); //revoke write + runFail("ALTER TABLE foo1 RENAME TO foo2"); + + Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable"); + run("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath); + whFs.mkdirs(tablePath, perm500); //revoke write + runFail("ALTER TABLE foo3 RENAME TO foo4"); + } + + @Test + public void testAlterTableRelocate() throws Exception { + run("CREATE TABLE foo1 (foo INT) STORED AS RCFILE"); + Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable"); + run("ALTER TABLE foo1 SET LOCATION '%s'", tablePath.makeQualified(whFs)); + + tablePath = new Path(whPath, new Random().nextInt() + "/mytable2"); + run("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'", + tablePath.makeQualified(whFs)); + tablePath = new Path(whPath, new Random().nextInt() + "/mytable2"); + run("ALTER TABLE foo3 SET LOCATION '%s'", tablePath.makeQualified(whFs)); + } + + @Test + public void testAlterTableRelocateFail() throws Exception { + run("CREATE TABLE foo1 (foo INT) STORED AS RCFILE"); + Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable"); + whFs.mkdirs(tablePath, perm500); //revoke write + runFail("ALTER TABLE foo1 SET LOCATION '%s'", tablePath.makeQualified(whFs)); + + //dont have access to new table loc + tablePath = new Path(whPath, new Random().nextInt() + "/mytable2"); + run("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'", + tablePath.makeQualified(whFs)); + tablePath = new Path(whPath, new Random().nextInt() + "/mytable2"); + whFs.mkdirs(tablePath, perm500); //revoke write + runFail("ALTER TABLE foo3 SET LOCATION '%s'", tablePath.makeQualified(whFs)); + + //have access to new table loc, but not old table loc + tablePath = new Path(whPath, new Random().nextInt() + "/mytable3"); + run("CREATE EXTERNAL TABLE foo4 (foo INT) STORED AS RCFILE LOCATION '%s'", + tablePath.makeQualified(whFs)); + whFs.mkdirs(tablePath, perm500); //revoke write + tablePath = new Path(whPath, new Random().nextInt() + "/mytable3"); + runFail("ALTER TABLE foo4 SET LOCATION '%s'", tablePath.makeQualified(whFs)); + } + + @Test + public void testAlterTable() throws Exception { + run("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE"); + run("ALTER TABLE foo1 SET TBLPROPERTIES ('foo'='bar')"); + run("ALTER TABLE foo1 SET SERDEPROPERTIES ('foo'='bar')"); + run("ALTER TABLE foo1 ADD COLUMNS (foo2 INT)"); + } + + @Test + public void testAddDropPartition() throws Exception { + run("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE"); + run("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10')"); + run("ALTER TABLE foo1 ADD IF NOT EXISTS PARTITION (b='2010-10-10')"); + String relPath = new Random().nextInt() + "/mypart"; + run("ALTER TABLE foo1 ADD PARTITION (b='2010-10-11') LOCATION '%s'", relPath); + + run("ALTER TABLE foo1 PARTITION (b='2010-10-10') SET FILEFORMAT RCFILE"); + + run("ALTER TABLE foo1 PARTITION (b='2010-10-10') SET FILEFORMAT INPUTFORMAT " + + "'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " + + "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver " + + "'mydriver' outputdriver 'yourdriver'"); + + run("ALTER TABLE foo1 DROP PARTITION (b='2010-10-10')"); + run("ALTER TABLE foo1 DROP PARTITION (b='2010-10-11')"); + } + + @Test + public void testAddPartitionFail1() throws Exception { + run("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE"); + whFs.mkdirs(getTablePath("default", "foo1"), perm500); + runFail("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10')"); + } + + @Test + public void testAddPartitionFail2() throws Exception { + run("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE"); + String relPath = new Random().nextInt() + "/mypart"; + Path partPath = new Path(getTablePath("default", "foo1"), relPath); + whFs.mkdirs(partPath, perm500); + runFail("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10') LOCATION '%s'", partPath); + } + + @Test + public void testDropPartitionFail1() throws Exception { + run("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE"); + run("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10')"); + whFs.mkdirs(getPartPath("b=2010-10-10", "default", "foo1"), perm500); + runFail("ALTER TABLE foo1 DROP PARTITION (b='2010-10-10')"); + } + + @Test + public void testDropPartitionFail2() throws Exception { + run("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE"); + String relPath = new Random().nextInt() + "/mypart"; + Path partPath = new Path(getTablePath("default", "foo1"), relPath); + whFs.mkdirs(partPath, perm700); + run("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10') LOCATION '%s'", partPath); + whFs.mkdirs(partPath, perm500); //revoke write + runFail("ALTER TABLE foo1 DROP PARTITION (b='2010-10-10')"); + } + + @Test + public void testAlterTableFail() throws Exception { + run("CREATE TABLE foo1 (foo INT) PARTITIONED BY (boo STRING) STORED AS TEXTFILE"); + whFs.mkdirs(getTablePath("default", "foo1"), perm500); //revoke write + runFail("ALTER TABLE foo1 SET TBLPROPERTIES ('foo'='bar')"); + runFail("ALTER TABLE foo1 SET SERDEPROPERTIES ('foo'='bar')"); + runFail("ALTER TABLE foo1 ADD COLUMNS (foo2 INT)"); + } + + @Test + public void testShowTables() throws Exception { + run("CREATE TABLE foo1 (foo INT) PARTITIONED BY (boo STRING) STORED AS TEXTFILE"); + run("SHOW PARTITIONS foo1"); + + whFs.mkdirs(getTablePath("default", "foo1"), perm300); //revoke read + runFail("SHOW PARTITIONS foo1"); + } + + @Test + public void testAlterTablePartRelocate() throws Exception { + run("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS RCFILE"); + run("ALTER TABLE foo1 ADD PARTITION (b='2010-10-16')"); + Path partPath = new Path(whPath, new Random().nextInt() + "/mypart"); + run("ALTER TABLE foo1 PARTITION (b='2010-10-16') SET LOCATION '%s'", partPath.makeQualified(whFs)); + } + + @Test + public void testAlterTablePartRelocateFail() throws Exception { + run("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS RCFILE"); + + Path oldLoc = new Path(whPath, new Random().nextInt() + "/mypart"); + Path newLoc = new Path(whPath, new Random().nextInt() + "/mypart2"); + + run("ALTER TABLE foo1 ADD PARTITION (b='2010-10-16') LOCATION '%s'", oldLoc); + whFs.mkdirs(oldLoc, perm500); + runFail("ALTER TABLE foo1 PARTITION (b='2010-10-16') SET LOCATION '%s'", newLoc.makeQualified(whFs)); + whFs.mkdirs(oldLoc, perm700); + whFs.mkdirs(newLoc, perm500); + runFail("ALTER TABLE foo1 PARTITION (b='2010-10-16') SET LOCATION '%s'", newLoc.makeQualified(whFs)); + } + + //TODO: import / export + + public void run(String format, Object ... args) throws Exception { + String command = String.format(format, args); + CommandProcessorResponse resp = hcatDriver.run(command); + Assert.assertEquals(resp.getErrorMessage(), 0, resp.getResponseCode()); + Assert.assertEquals(resp.getErrorMessage(), null, resp.getErrorMessage()); + } + + /** Run the query expecting it to fail */ + public void runFail(String format, Object ... args) throws Exception { + String command = String.format(format, args); + CommandProcessorResponse resp = hcatDriver.run(command); + Assert.assertNotSame(resp.getErrorMessage(), 0, resp.getResponseCode()); + Assert.assertTrue(resp.getErrorMessage().contains("Permission denied")); + } + +}