diff --git a/hcatalog/src/test/e2e/templeton/tests/doas.conf b/hcatalog/src/test/e2e/templeton/tests/doas.conf index 284168b..5d67968 100644 --- a/hcatalog/src/test/e2e/templeton/tests/doas.conf +++ b/hcatalog/src/test/e2e/templeton/tests/doas.conf @@ -109,7 +109,7 @@ $cfg = 'method' => 'GET', 'url' => ':TEMPLETON_URL:/templeton/v1/ddl/database/default/table/:UNAME:_doastab2/partition?user.name=:UNAME:&doAs=:DOAS:', 'status_code' => 500, - 'json_field_substr_match' => {'error' => 'FAILED: AuthorizationException java\.security\.AccessControlException: action READ not permitted on path .* for user :DOAS:'}, + 'json_field_substr_match' => {'error' => 'java\.security\.AccessControlException: Permission denied: user=:DOAS:, access=READ'}, }, { @@ -118,7 +118,7 @@ $cfg = 'method' => 'DELETE', 'url' => ':TEMPLETON_URL:/templeton/v1/ddl/database/default/table/:UNAME:_doastab2?user.name=:UNAME:&doAs=:DOAS:', 'status_code' => 500, - 'json_field_substr_match' => {'error' => 'java\.security\.AccessControlException: action WRITE not permitted on path .* for user :DOAS:'}, + 'json_field_substr_match' => {'error' => 'java\.security\.AccessControlException: Permission denied: user=:DOAS:, access=READ'}, }, { #descbe the table.... diff --git a/hcatalog/src/test/e2e/templeton/tests/hcatperms.conf b/hcatalog/src/test/e2e/templeton/tests/hcatperms.conf index 9b6d71f..b26bd1f 100644 --- a/hcatalog/src/test/e2e/templeton/tests/hcatperms.conf +++ b/hcatalog/src/test/e2e/templeton/tests/hcatperms.conf @@ -375,6 +375,13 @@ $cfg = { 'method' => 'DELETE', 'format_header' => 'Content-Type: application/json', + 'url' => ':TEMPLETON_URL:/templeton/v1/ddl/database/hcatperms_:TNUM:/table/permstable_:TNUM:', + 'user_name' => ':UNAME:', + 'status_code' => 200, + }, + { + 'method' => 'DELETE', + 'format_header' => 'Content-Type: application/json', 'url' => ':TEMPLETON_URL:/templeton/v1/ddl/database/hcatperms_:TNUM:?ifExists=true&option=cascade', 'user_name' => ':UNAME:', 'status_code' => 200, @@ -677,9 +684,7 @@ $cfg = 'format_header' => 'Content-Type: application/json', 'user_name' => ':UNAME_OTHER:', 'status_code' => 500, - 'json_field_substr_match' => {'error' => 'FAILED: AuthorizationException .*\.security\.AccessControlException: action READ not permitted on path .* for user :UNAME_OTHER:'}, - - + 'json_field_substr_match' => {'error' => 'AccessControlException: Permission denied: user=:UNAME_OTHER:, access=READ'}, }, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 4941427..06266b3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -109,8 +109,8 @@ import org.apache.hadoop.hive.shims.HadoopShims; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.mapred.InputFormat; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.StringUtils; import org.apache.thrift.TException; import com.google.common.collect.Sets; @@ -427,9 +427,9 @@ public void alterTable(String tblName, Table newTbl) newTbl.checkValidity(); getMSC().alter_table(names[0], names[1], newTbl.getTTable()); } catch (MetaException e) { - throw new HiveException("Unable to alter table.", e); + throw new HiveException("Unable to alter table. " + e.getMessage(), e); } catch (TException e) { - throw new HiveException("Unable to alter table.", e); + throw new HiveException("Unable to alter table. " + e.getMessage(), e); } } @@ -455,9 +455,9 @@ public void alterIndex(String dbName, String baseTblName, String idxName, Index try { getMSC().alter_index(dbName, baseTblName, idxName, newIdx); } catch (MetaException e) { - throw new HiveException("Unable to alter index.", e); + throw new HiveException("Unable to alter index. " + e.getMessage(), e); } catch (TException e) { - throw new HiveException("Unable to alter index.", e); + throw new HiveException("Unable to alter index. " + e.getMessage(), e); } } @@ -502,9 +502,9 @@ public void alterPartition(String dbName, String tblName, Partition newPart) getMSC().alter_partition(dbName, tblName, newPart.getTPartition()); } catch (MetaException e) { - throw new HiveException("Unable to alter partition.", e); + throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } catch (TException e) { - throw new HiveException("Unable to alter partition.", e); + throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } } @@ -534,9 +534,9 @@ public void alterPartitions(String tblName, List newParts) } getMSC().alter_partitions(names[0], names[1], newTParts); } catch (MetaException e) { - throw new HiveException("Unable to alter partition.", e); + throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } catch (TException e) { - throw new HiveException("Unable to alter partition.", e); + throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } } /** @@ -578,11 +578,11 @@ public void renamePartition(Table tbl, Map oldPartSpec, Partitio newPart.getTPartition()); } catch (InvalidOperationException e){ - throw new HiveException("Unable to rename partition.", e); + throw new HiveException("Unable to rename partition. " + e.getMessage(), e); } catch (MetaException e) { - throw new HiveException("Unable to rename partition.", e); + throw new HiveException("Unable to rename partition. " + e.getMessage(), e); } catch (TException e) { - throw new HiveException("Unable to rename partition.", e); + throw new HiveException("Unable to rename partition. " + e.getMessage(), e); } } @@ -591,11 +591,11 @@ public void alterDatabase(String dbName, Database db) try { getMSC().alterDatabase(dbName, db); } catch (MetaException e) { - throw new HiveException("Unable to alter database " + dbName, e); + throw new HiveException("Unable to alter database " + dbName + ". " + e.getMessage(), e); } catch (NoSuchObjectException e) { throw new HiveException("Database " + dbName + " does not exists.", e); } catch (TException e) { - throw new HiveException("Unable to alter database " + dbName, e); + throw new HiveException("Unable to alter database " + dbName + ". " + e.getMessage(), e); } } /** @@ -870,9 +870,9 @@ public boolean dropIndex(String db_name, String tbl_name, String index_name, boo try { return getMSC().dropIndex(db_name, tbl_name, index_name, deleteData); } catch (NoSuchObjectException e) { - throw new HiveException("Partition or table doesn't exist.", e); + throw new HiveException("Partition or table doesn't exist. " + e.getMessage(), e); } catch (Exception e) { - throw new HiveException("Unknown error. Please check logs.", e); + throw new HiveException(e.getMessage(), e); } } @@ -1041,7 +1041,7 @@ public Table getTable(final String dbName, final String tableName, } return null; } catch (Exception e) { - throw new HiveException("Unable to fetch table " + tableName, e); + throw new HiveException("Unable to fetch table " + tableName + ". " + e.getMessage(), e); } // For non-views, we need to do some extra fixes @@ -1755,7 +1755,7 @@ public boolean dropPartition(String db_name, String tbl_name, } catch (NoSuchObjectException e) { throw new HiveException("Partition or table doesn't exist.", e); } catch (Exception e) { - throw new HiveException("Unknown error. Please check logs.", e); + throw new HiveException(e.getMessage(), e); } } @@ -1784,7 +1784,7 @@ public boolean dropPartition(String db_name, String tbl_name, } catch (NoSuchObjectException e) { throw new HiveException("Partition or table doesn't exist.", e); } catch (Exception e) { - throw new HiveException("Unknown error. Please check logs.", e); + throw new HiveException(e.getMessage(), e); } } @@ -2277,7 +2277,7 @@ public boolean accept(Path p) { result.add(srcToDest); } } catch (IOException e) { - throw new HiveException("checkPaths: filesystem error in check phase", e); + throw new HiveException("checkPaths: filesystem error in check phase. " + e.getMessage(), e); } return result; } @@ -2344,7 +2344,7 @@ public static boolean renameFile(HiveConf conf, Path srcf, Path destf, try { ShimLoader.getHadoopShims().setFullFileStatus(conf, destStatus, fs, destf); } catch (IOException e) { - LOG.warn("Error setting permission of file " + destf + ": "+ StringUtils.stringifyException(e)); + LOG.warn("Error setting permission of file " + destf + ": "+ e.getMessage(), e); } } return success; @@ -2383,7 +2383,7 @@ static protected void copyFiles(HiveConf conf, Path srcf, Path destf, srcs = srcFs.globStatus(srcf); } catch (IOException e) { LOG.error(StringUtils.stringifyException(e)); - throw new HiveException("addFiles: filesystem error in check phase", e); + throw new HiveException("addFiles: filesystem error in check phase. " + e.getMessage(), e); } if (srcs == null) { LOG.info("No sources specified to move: " + srcf); @@ -2409,7 +2409,7 @@ static protected void copyFiles(HiveConf conf, Path srcf, Path destf, } } } catch (IOException e) { - throw new HiveException("copyFiles: error while moving files!!!", e); + throw new HiveException("copyFiles: error while moving files!!! " + e.getMessage(), e); } } } @@ -2481,7 +2481,7 @@ private static void moveAcidFiles(FileSystem fs, FileStatus[] stats, Path dst) fs.rename(bucketSrc, bucketDest); } } catch (IOException e) { - throw new HiveException("Error moving acid files", e); + throw new HiveException("Error moving acid files " + e.getMessage(), e); } } } @@ -2713,7 +2713,7 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) throws throw new HiveException(e); } } - + public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws HiveException { try { return getMSC().setPartitionColumnStatistics(request); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 4ff9678..1a62ac5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -1234,7 +1234,7 @@ protected Database getDatabase(String dbName, boolean throwException) throws Sem try { database = db.getDatabase(dbName); } catch (Exception e) { - throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(dbName), e); + throw new SemanticException(e.getMessage(), e); } if (database == null && throwException) { throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(dbName)); @@ -1264,9 +1264,13 @@ protected Table getTable(String database, String tblName, boolean throwException try { tab = database == null ? db.getTable(tblName, false) : db.getTable(database, tblName, false); - } catch (Exception e) { + } + catch (InvalidTableException e) { throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName), e); } + catch (Exception e) { + throw new SemanticException(e.getMessage(), e); + } if (tab == null && throwException) { throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java index 9fc1aa0..34e6275 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.Table; /** @@ -95,8 +96,10 @@ private Table getTable(ASTNode tree) throws SemanticException { String tableName = getUnescapedName((ASTNode) tree.getChild(0).getChild(0)); try { return db.getTable(tableName); + } catch (InvalidTableException e) { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName), e); } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName)); + throw new SemanticException(e.getMessage(), e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index eb20be3..a8420ae 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -79,6 +79,7 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.authorization.AuthorizationParseUtils; @@ -1718,7 +1719,8 @@ static public String getFullyQualifiedName(ASTNode ast) { // assume the first component of DOT delimited name is tableName // get the attemptTableName - static public String getAttemptTableName(Hive db, String qualifiedName, boolean isColumn) { + static public String getAttemptTableName(Hive db, String qualifiedName, boolean isColumn) + throws SemanticException { // check whether the name starts with table // DESCRIBE table // DESCRIBE table.column @@ -1739,11 +1741,13 @@ static public String getAttemptTableName(Hive db, String qualifiedName, boolean return tableName; } } - } catch (HiveException e) { + } catch (InvalidTableException e) { // assume the first DOT delimited component is tableName // OK if it is not // do nothing when having exception return null; + } catch (HiveException e) { + throw new SemanticException(e.getMessage(), e); } return null; } @@ -1824,7 +1828,7 @@ static public String getColPath( ASTNode parentAst, ASTNode ast, String tableName, - Map partSpec) { + Map partSpec) throws SemanticException { // if parent has two children // it could be DESCRIBE table key @@ -1880,11 +1884,13 @@ static public String getColPath( Table tab = null; try { tab = db.getTable(tableName); - } catch (HiveException e) { - // if table not valid - // throw semantic exception + } + catch (InvalidTableException e) { throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName), e); } + catch (HiveException e) { + throw new SemanticException(e.getMessage(), e); + } if (partSpec != null) { Partition part = null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 41c75ef..25ef54b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -102,6 +102,7 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; @@ -10845,9 +10846,13 @@ private void validateAnalyzeNoscan(ASTNode tree) throws SemanticException { Table tbl; try { tbl = db.getTable(tableName); - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName)); + } catch (InvalidTableException e) { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName), e); + } + catch (HiveException e) { + throw new SemanticException(e.getMessage(), e); } + /* noscan uses hdfs apis to retrieve such information from Namenode. */ /* But that will be specific to hdfs. Through storagehandler mechanism, */ /* storage of table could be on any storage system: hbase, cassandra etc. */ @@ -10870,8 +10875,10 @@ private void validateAnalyzePartialscan(ASTNode tree) throws SemanticException { Table tbl; try { tbl = db.getTable(tableName); + } catch (InvalidTableException e) { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName), e); } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName)); + throw new SemanticException(e.getMessage(), e); } /* partialscan uses hdfs apis to retrieve such information from Namenode. */ /* But that will be specific to hdfs. Through storagehandler mechanism, */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java index 82c8333..a2ad8fd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java @@ -17,6 +17,13 @@ */ package org.apache.hadoop.hive.ql.parse; +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.Context; @@ -27,19 +34,12 @@ import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.session.SessionState; -import java.io.IOException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - - /** * A subclass of the {@link org.apache.hadoop.hive.ql.parse.SemanticAnalyzer} that just handles * update and delete statements. It works by rewriting the updates and deletes into insert @@ -128,11 +128,16 @@ private void reparseAndSuperAnalyze(ASTNode tree) throws SemanticException { Table mTable; try { mTable = db.getTable(tableName[0], tableName[1]); + } catch (InvalidTableException e) { + LOG.error("Failed to find table " + getDotName(tableName) + " got exception " + + e.getMessage()); + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(getDotName(tableName)), e); } catch (HiveException e) { - LOG.error("Failed to find table " + getDotName(tableName) + " got exception " + - e.getMessage()); - throw new SemanticException(ErrorMsg.INVALID_TABLE, getDotName(tableName)); + LOG.error("Failed to find table " + getDotName(tableName) + " got exception " + + e.getMessage()); + throw new SemanticException(e.getMessage(), e); } + List partCols = mTable.getPartCols(); rewrittenQueryStr.append("insert into table ");