diff --git hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java index 3393a32..f5cf66e 100644 --- hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java +++ hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java @@ -81,7 +81,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws HiveException { context.getConf().set(HCatConstants.HCAT_CREATE_DB_NAME, databaseName); super.postAnalyze(context, rootTasks); } diff --git hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java index 36a6e0c..9beda39 100644 --- hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java +++ hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java @@ -56,7 +56,7 @@ @Override public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, - ASTNode ast) throws SemanticException { + ASTNode ast) throws HiveException { Hive db; try { diff --git hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index 1d4a9a1..e75b90e 100644 --- hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -61,7 +61,7 @@ @Override public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) - throws SemanticException { + throws HiveException { this.ast = ast; switch (ast.getToken().getType()) { @@ -159,7 +159,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws HiveException { try { diff --git hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java index eb40e22..b0038ec 100644 --- hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java +++ hcatalog/core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java @@ -57,7 +57,7 @@ public HiveAuthorizationProvider getAuthProvider() { @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws HiveException { super.postAnalyze(context, rootTasks); //Authorize the operation. diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java index 887a64d..a5e1842 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java @@ -78,7 +78,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws HiveException { context.getConf().set(HCatConstants.HCAT_CREATE_DB_NAME, databaseName); super.postAnalyze(context, rootTasks); } diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java index ec24531..eb50ab7 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java @@ -52,7 +52,7 @@ @Override public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, - ASTNode ast) throws SemanticException { + ASTNode ast) throws HiveException { Hive db; try { diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index 97973db..d72a89d 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -58,7 +58,7 @@ @Override public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) - throws SemanticException { + throws HiveException { this.ast = ast; switch (ast.getToken().getType()) { @@ -156,7 +156,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws HiveException { try { diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java index 0184dc0..c118c6d 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java @@ -56,7 +56,7 @@ public HiveAuthorizationProvider getAuthProvider() { @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws HiveException { super.postAnalyze(context, rootTasks); //Authorize the operation. diff --git ql/src/java/org/apache/hadoop/hive/ql/Driver.java ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 72c04d3..1ad7faa 100644 --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -139,7 +139,7 @@ private String userName; - private boolean checkConcurrency() throws SemanticException { + private boolean checkConcurrency() throws HiveException { boolean supportConcurrency = conf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY); if (!supportConcurrency) { LOG.info("Concurrency mode is disabled, not creating a lock manager"); @@ -153,7 +153,7 @@ private boolean checkConcurrency() throws SemanticException { return true; } - private void createLockManager() throws SemanticException { + private void createLockManager() throws HiveException { if (hiveLockMgr != null) { return; } @@ -711,7 +711,7 @@ public QueryPlan getPlan() { * SHARED mode. */ private List getLockObjects(Database d, Table t, Partition p, HiveLockMode mode) - throws SemanticException { + throws HiveException { List locks = new LinkedList(); HiveLockObjectData lockData = @@ -758,16 +758,12 @@ public QueryPlan getPlan() { String[] nameValue = partn.split("="); assert(nameValue.length == 2); partialSpec.put(nameValue[0], nameValue[1]); - try { - locks.add(new HiveLockObj( - new HiveLockObject(new DummyPartition(p.getTable(), p.getTable().getDbName() - + "/" + p.getTable().getTableName() - + "/" + partialName, - partialSpec), lockData), mode)); - partialName += "/"; - } catch (HiveException e) { - throw new SemanticException(e.getMessage()); - } + locks.add(new HiveLockObj( + new HiveLockObject(new DummyPartition(p.getTable(), p.getTable().getDbName() + + "/" + p.getTable().getTableName() + + "/" + partialName, + partialSpec), lockData), mode)); + partialName += "/"; } locks.add(new HiveLockObj(new HiveLockObject(p.getTable(), lockData), mode)); @@ -881,14 +877,7 @@ else if (output.getTyp() == WriteEntity.Type.DUMMYPARTITION) { } return (0); - } catch (SemanticException e) { - errorMessage = "FAILED: Error in acquiring locks: " + e.getMessage(); - SQLState = ErrorMsg.findSQLState(e.getMessage()); - downstreamError = e; - console.printError(errorMessage, "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); - return (10); - } catch (LockException e) { + } catch (HiveException e) { errorMessage = "FAILED: Error in acquiring locks: " + e.getMessage(); SQLState = ErrorMsg.findSQLState(e.getMessage()); downstreamError = e; @@ -1046,7 +1035,7 @@ private CommandProcessorResponse runInternal(String command, boolean alreadyComp boolean ckLock = false; try { ckLock = checkConcurrency(); - } catch (SemanticException e) { + } catch (HiveException e) { errorMessage = "FAILED: Error in semantic analysis: " + e.getMessage(); SQLState = ErrorMsg.findSQLState(e.getMessage()); downstreamError = e; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 0813bf3..b4ecfdd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -501,7 +501,7 @@ private int grantOrRevokeRole(GrantRevokeRoleDDL grantOrRevokeRoleDDL) } } } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } return 0; } @@ -599,7 +599,7 @@ private int showGrants(ShowGrantDesc showGrantDesc) throws HiveException { return 1; } catch (Exception e) { e.printStackTrace(); - throw new HiveException(e); + throw HiveException.wrap(e); } return 0; } @@ -1329,7 +1329,7 @@ private int archive(Hive db, AlterTableSimpleDesc simpleDesc, conf.setVar(HiveConf.ConfVars.HADOOPJOBNAME, jobname); ret = shim.createHadoopArchive(conf, originalDir, tmpPath, archiveName); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } if (ret != 0) { throw new HiveException("Error while creating HAR"); @@ -1555,7 +1555,7 @@ private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) ret = ToolRunner.run(fss, args.toArray(new String[0])); } catch (Exception e) { e.printStackTrace(); - throw new HiveException(e); + throw HiveException.wrap(e); } if (ret != 0) { @@ -1571,7 +1571,7 @@ private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) } fs.rename(tmpPath, intermediateExtractedDir); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -2038,7 +2038,7 @@ else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) { LOG.info("show create table: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } finally { IOUtils.closeStream((FSDataOutputStream) outStream); } @@ -2267,7 +2267,7 @@ private int showFunctions(ShowFunctionsDesc showFuncs) throws HiveException { LOG.warn("show function: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } finally { IOUtils.closeStream((FSDataOutputStream) outStream); } @@ -2600,7 +2600,7 @@ private int describeFunction(DescFunctionDesc descFunc) throws HiveException { LOG.warn("describe function: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } finally { IOUtils.closeStream((FSDataOutputStream) outStream); } @@ -2750,7 +2750,7 @@ private int showTableProperties(Hive db, ShowTblPropertiesDesc showTblPrpt) thro LOG.info("show table properties: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } return 0; @@ -3373,7 +3373,7 @@ private int dropTable(Hive db, DropTableDesc dropTbl) try { partitions = db.getPartitionsByFilter(tbl, partSpec.toString()); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } else { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DefaultFetchFormatter.java ql/src/java/org/apache/hadoop/hive/ql/exec/DefaultFetchFormatter.java index 25385ba..f734561 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DefaultFetchFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DefaultFetchFormatter.java @@ -45,7 +45,7 @@ public void initialize(Configuration hconf, Properties props) throws HiveExcepti try { mSerde = initializeSerde(hconf, props); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index a314ce7..695be81 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -334,7 +334,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { throw e; } catch (Exception e) { e.printStackTrace(); - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -447,7 +447,7 @@ protected void createBucketFiles(FSPaths fsp) throws HiveException { } catch (Exception e) { e.printStackTrace(); - throw new HiveException(e); + throw HiveException.wrap(e); } LOG.info("New Final Path: FS " + fsp.finalPaths[filesIdx]); @@ -486,7 +486,7 @@ protected void createBucketFiles(FSPaths fsp) throws HiveException { throw e; } catch (Exception e) { e.printStackTrace(); - throw new HiveException(e); + throw HiveException.wrap(e); } filesCreated = true; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java index 516ba42..4fa6a6f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java @@ -78,7 +78,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { conditionInspector = null; ioContext = IOContext.get(); } catch (Throwable e) { - throw new HiveException(e); + throw HiveException.wrap(e); } initializeChildren(hconf); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java index 96a78fc..c651525 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java @@ -921,7 +921,7 @@ public static boolean implicitConvertable(TypeInfo from, TypeInfo to) { @SuppressWarnings("deprecation") public static GenericUDAFEvaluator getGenericUDAFEvaluator(String name, List argumentOIs, boolean isDistinct, - boolean isAllColumns) throws SemanticException { + boolean isAllColumns) throws HiveException { GenericUDAFResolver udafResolver = getGenericUDAFResolver(name); if (udafResolver == null) { @@ -951,7 +951,7 @@ public static GenericUDAFEvaluator getGenericUDAFEvaluator(String name, @SuppressWarnings("deprecation") public static GenericUDAFEvaluator getGenericWindowingEvaluator(String name, List argumentOIs, boolean isDistinct, - boolean isAllColumns) throws SemanticException { + boolean isAllColumns) throws HiveException { WindowFunctionInfo finfo = windowFunctions.get(name.toLowerCase()); if (finfo == null) { return null;} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java index c4c85fa..05ea169 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java @@ -805,7 +805,7 @@ public void processOp(Object row, int tag) throws HiveException { } catch (HiveException e) { throw e; } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1096,7 +1096,7 @@ public void flush() throws HiveException{ // Just do nothing here. } } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1138,7 +1138,7 @@ public void closeOp(boolean abort) throws HiveException { flush(); } } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java index 3e17ae7..a5fafe4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java @@ -122,7 +122,7 @@ public void processOp(Object row, int tag) throws HiveException { storage[alias].add(nr); } catch (Exception e) { e.printStackTrace(); - throw new HiveException(e); + throw HiveException.wrap(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ListSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/ListSinkOperator.java index dcc19f7..23809bc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/ListSinkOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ListSinkOperator.java @@ -47,7 +47,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { try { fetcher = initializeFetcher(hconf); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } super.initializeOp(hconf); } @@ -87,7 +87,7 @@ public void processOp(Object row, int tag) throws HiveException { res.add(fetcher.convert(row, inputObjInspectors[0])); numRows++; } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java index 5b6dd6a..bad19fa 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java @@ -316,7 +316,7 @@ else if (partRawRowObjectInspector.equals(tblRawRowObjectInspector)) { } } } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } return tableDescOI; } @@ -386,7 +386,7 @@ public void setChildren(Configuration hconf) throws HiveException { // we found all the operators that we are supposed to process. setChildOperators(children); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java index aa96d23..40e407f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java @@ -32,7 +32,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -677,10 +676,10 @@ public void removeChild(Operator child) { * Remove a child and add all of the child's children to the location of the child * * @param child If this operator is not the only parent of the child. There can be unpredictable result. - * @throws SemanticException + * @throws HiveException */ public void removeChildAndAdoptItsChildren( - Operator child) throws SemanticException { + Operator child) throws HiveException { int childIndex = childOperators.indexOf(child); if (childIndex == -1) { throw new SemanticException( diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java index 24a812d..f9e26ea 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java @@ -140,7 +140,7 @@ public OpTuple(Class descClass, Class> opClass) { return op; } catch (Exception e) { e.printStackTrace(); - throw new HiveException(e); + throw HiveException.wrap(e); } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java index c378dc7..0732e11 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java @@ -281,10 +281,8 @@ public void processOp(Object row, int tag) throws HiveException { hiveKey.setHashCode(hashCode); collect(hiveKey, value); } - } catch (HiveException e) { - throw e; } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 3262c9f..902e644 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -152,7 +152,6 @@ import org.apache.hadoop.hive.ql.stats.StatsFactory; import org.apache.hadoop.hive.ql.stats.StatsPublisher; import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.Serializer; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.shims.ShimLoader; @@ -1746,12 +1745,8 @@ private static void createEmptyBuckets(Configuration hconf, ArrayList pa serializer.initialize(null, tableInfo.getProperties()); outputClass = serializer.getSerializedClass(); hiveOutputFormat = conf.getTableInfo().getOutputFileFormatClass().newInstance(); - } catch (SerDeException e) { - throw new HiveException(e); - } catch (InstantiationException e) { - throw new HiveException(e); - } catch (IllegalAccessException e) { - throw new HiveException(e); + } catch (Exception e) { + throw HiveException.wrap(e); } for (String p : paths) { @@ -2009,7 +2004,7 @@ public static String formatBinaryString(byte[] array, int start, int length) { } public static void validateColumnNames(List colNames, List checkCols) - throws SemanticException { + throws HiveException { Iterator checkColsIter = checkCols.iterator(); while (checkColsIter.hasNext()) { String toCheck = checkColsIter.next(); @@ -2528,10 +2523,10 @@ public static double showTime(long time) { * @param task * @param reworkMapredWork * @param conf - * @throws SemanticException + * @throws HiveException */ public static void reworkMapRedWork(Task task, - boolean reworkMapredWork, HiveConf conf) throws SemanticException { + boolean reworkMapredWork, HiveConf conf) throws HiveException { if (reworkMapredWork && (task instanceof MapRedTask)) { try { MapredWork mapredWork = ((MapRedTask) task).getWork(); @@ -2552,7 +2547,7 @@ public static void reworkMapRedWork(Task task, } } } catch (IOException e) { - throw new SemanticException(e); + throw new HiveException(e); } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java index efe5710..b6f788c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java @@ -97,7 +97,7 @@ public void load(ExecMapperContext context, } } } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java index 9b2babc..39a99a3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java @@ -209,10 +209,7 @@ private void readBlock(int blockNum) throws HiveException { } catch(Exception e) { clear(); LOG.error(e.toString(), e); - if ( e instanceof HiveException ) { - throw (HiveException) e; - } - throw new HiveException(e); + throw HiveException.wrap(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java index 3b0bc2a..155efe5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java @@ -228,7 +228,7 @@ public ROW first() throws HiveException { removeKeys(ret); return ret; } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -318,10 +318,7 @@ private void spillBlock(ROW[] block, int length) throws HiveException { } catch (Exception e) { clear(); LOG.error(e.toString(), e); - if ( e instanceof HiveException ) { - throw (HiveException) e; - } - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -376,7 +373,7 @@ protected boolean nextBlock(int readIntoOffset) throws HiveException { } catch (HiveException e1) { LOG.error(e.getMessage(), e); } - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -421,7 +418,7 @@ public void clear() throws HiveException { } } catch (Exception e) { LOG.error(e.toString()); - throw new HiveException(e); + throw HiveException.wrap(e); } finally { rw = null; rr = null; @@ -526,7 +523,7 @@ protected void setupWriter() throws HiveException { } catch (Exception e) { clear(); LOG.error(e.toString(), e); - throw new HiveException(e); + throw HiveException.wrap(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java index 2df8ab9..1f2860d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java @@ -91,12 +91,8 @@ public void load(ExecMapperContext context, } mapJoinTables[pos] = tableContainer; - } catch (IOException e) { - throw new HiveException(e); - } catch (SerDeException e) { - throw new HiveException(e); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExpressionDescriptor.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExpressionDescriptor.java index 59758d1..fb72242 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExpressionDescriptor.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExpressionDescriptor.java @@ -199,7 +199,7 @@ private Descriptor(Mode mode, int argCount, ArgumentType[] argTypes, InputExpres return ve; } } catch (Exception ex) { - throw new HiveException(ex); + throw HiveException.wrap(ex); } } return null; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java index 907ace1..34c786e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java @@ -63,7 +63,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { statsMap.put(Counter.FILTERED, filtered_count); statsMap.put(Counter.PASSED, passed_count); } catch (Throwable e) { - throw new HiveException(e); + throw HiveException.wrap(e); } if (conditionEvaluator instanceof ConstantVectorExpression) { ConstantVectorExpression cve = (ConstantVectorExpression) this.conditionEvaluator; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java index 4568496..b62bbfe 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java @@ -184,10 +184,8 @@ protected void initializeOp(Configuration hconf) throws HiveException { outputObjInspector = ObjectInspectorFactory.getStandardStructObjectInspector( outputFieldNames, objectInspectors); - } catch (HiveException he) { - throw he; } catch (Throwable e) { - throw new HiveException(e); + throw HiveException.wrap(e); } computeMemoryLimits(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java index 661725e..aa57ace 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java @@ -190,7 +190,7 @@ public void assign(VectorExpressionWriter[] writers, reducerHash.initialize(limit, memUsage, conf.isMapGroupBy(), this); } } catch(Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java index f5ab731..10dac4d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java @@ -518,7 +518,7 @@ private VectorExpression createVectorExpression(Class vectorClass, List stack, NodeProcessorCtx procCtx, Object... nodeOutputs) - throws SemanticException { + throws HiveException { // We can only push down stuff which appears as part of // a pure conjunction: reject OR, CASE, etc. @@ -147,7 +147,7 @@ public Object process(Node nd, Stack stack, HashMap nodeOutput = new HashMap(); try { ogw.startWalking(topNodes, nodeOutput); - } catch (SemanticException ex) { + } catch (HiveException ex) { throw new RuntimeException(ex); } ExprNodeDesc residualPredicate = (ExprNodeDesc) nodeOutput.get(predicate); diff --git ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java index 4a77645..547def3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java @@ -94,7 +94,7 @@ } return indexBuilderTasks; } catch (Exception e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java index 95db96b..4156ad6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java @@ -223,7 +223,7 @@ public static boolean checkInputFormat(FileSystem fs, HiveConf conf, } return checkerInstance.validateInput(fs, conf, files); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } return true; @@ -282,7 +282,7 @@ public static FSRecordWriter getHiveRecordWriter(JobConf jc, return getRecordWriter(jc_output, hiveOutputFormat, outputClass, isCompressed, tableInfo.getProperties(), outPath, reporter); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/CompositeProcessor.java ql/src/java/org/apache/hadoop/hive/ql/lib/CompositeProcessor.java index d7d5e80..2fe168d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lib/CompositeProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/lib/CompositeProcessor.java @@ -19,7 +19,7 @@ import java.util.Stack; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * CompositeProcessor. Holds a list of node processors to be fired by the same @@ -36,7 +36,7 @@ public CompositeProcessor(NodeProcessor...nodeProcessors) { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) - throws SemanticException { + throws HiveException { for (NodeProcessor proc: procs) { proc.process(nd, stack, procCtx, nodeOutputs); } diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java index 7d5983a..c5d6da9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java +++ ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java @@ -25,7 +25,7 @@ import java.util.Set; import java.util.Stack; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * base class for operator graph walker this class takes list of starting ops @@ -72,16 +72,16 @@ public DefaultGraphWalker(Dispatcher disp) { * node being walked * @param ndStack * stack of nodes encountered - * @throws SemanticException + * @throws HiveException */ - public void dispatch(Node nd, Stack ndStack) throws SemanticException { + public void dispatch(Node nd, Stack ndStack) throws HiveException { dispatchAndReturn(nd, ndStack); } /** * Returns dispatch result */ - public T dispatchAndReturn(Node nd, Stack ndStack) throws SemanticException { + public T dispatchAndReturn(Node nd, Stack ndStack) throws HiveException { Object[] nodeOutputs = null; if (nd.getChildren() != null) { nodeOutputs = new Object[nd.getChildren().size()]; @@ -99,10 +99,10 @@ public void dispatch(Node nd, Stack ndStack) throws SemanticException { /** * starting point for walking. * - * @throws SemanticException + * @throws HiveException */ public void startWalking(Collection startNodes, - HashMap nodeOutput) throws SemanticException { + HashMap nodeOutput) throws HiveException { toWalk.addAll(startNodes); while (toWalk.size() > 0) { Node nd = toWalk.remove(0); @@ -118,9 +118,9 @@ public void startWalking(Collection startNodes, * * @param nd * current operator in the graph - * @throws SemanticException + * @throws HiveException */ - public void walk(Node nd) throws SemanticException { + public void walk(Node nd) throws HiveException { if (opStack.empty() || nd != opStack.peek()) { opStack.push(nd); } diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultRuleDispatcher.java ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultRuleDispatcher.java index 8643563..c5afecc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultRuleDispatcher.java +++ ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultRuleDispatcher.java @@ -21,7 +21,7 @@ import java.util.Map; import java.util.Stack; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * Dispatches calls to relevant method in processor. The user registers various @@ -58,11 +58,11 @@ public DefaultRuleDispatcher(NodeProcessor defaultProc, * operator to process * @param ndStack * the operators encountered so far - * @throws SemanticException + * @throws HiveException */ @Override public Object dispatch(Node nd, Stack ndStack, Object... nodeOutputs) - throws SemanticException { + throws HiveException { // find the firing rule // find the rule from the stack specified diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java ql/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java index ccbeadf..febccf6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java +++ ql/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java @@ -20,7 +20,7 @@ import java.util.Stack; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * Dispatcher interface for Operators Used in operator graph walking to dispatch @@ -39,9 +39,9 @@ * The argument list of outputs from processing other nodes that are * passed to this dispatcher from the walker. * @return Object The return object from the processing call. - * @throws SemanticException + * @throws HiveException */ Object dispatch(Node nd, Stack stack, Object... nodeOutputs) - throws SemanticException; + throws HiveException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java index ef09365..085b5d6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java +++ ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java @@ -21,7 +21,7 @@ import java.util.Collection; import java.util.HashMap; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * Interface for operator graph walker. @@ -36,9 +36,9 @@ * @param nodeOutput * If this parameter is not null, the call to the function returns * the map from node to objects returned by the processors. - * @throws SemanticException + * @throws HiveException */ void startWalking(Collection startNodes, - HashMap nodeOutput) throws SemanticException; + HashMap nodeOutput) throws HiveException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java index bd83c88..16fb19a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java @@ -19,7 +19,7 @@ import java.util.Stack; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * Base class for processing operators which is no-op. The specific processors @@ -37,8 +37,8 @@ * @param nodeOutputs * A variable argument list of outputs from other nodes in the walk * @return Object to be returned by the process call - * @throws SemanticException + * @throws HiveException */ Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException; + Object... nodeOutputs) throws HiveException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java index 9e4612d..9c8824a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java +++ ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql.lib; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * base class for operator graph walker this class takes list of starting ops @@ -46,10 +46,10 @@ public PreOrderWalker(Dispatcher disp) { * * @param nd * current operator in the graph - * @throws SemanticException + * @throws HiveException */ @Override - public void walk(Node nd) throws SemanticException { + public void walk(Node nd) throws HiveException { opStack.push(nd); dispatch(nd, opStack); diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/Rule.java ql/src/java/org/apache/hadoop/hive/ql/lib/Rule.java index 29cd113..a56ea30 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lib/Rule.java +++ ql/src/java/org/apache/hadoop/hive/ql/lib/Rule.java @@ -20,7 +20,7 @@ import java.util.Stack; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * Rule interface for Operators Used in operator dispatching to dispatch @@ -31,9 +31,9 @@ /** * @return the cost of the rule - the lower the cost, the better the rule * matches - * @throws SemanticException + * @throws HiveException */ - int cost(Stack stack) throws SemanticException; + int cost(Stack stack) throws HiveException; /** * @return the name of the rule - may be useful for debugging diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java index 5e5c054..56505af 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java +++ ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java @@ -20,7 +20,7 @@ import java.util.Stack; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * Implentation of the Rule interface for Nodes Used in Node dispatching to dispatch @@ -60,9 +60,9 @@ public RuleExactMatch(String ruleName, String pattern) { * @param stack * Node stack encountered so far * @return cost of the function - * @throws SemanticException + * @throws HiveException */ - public int cost(Stack stack) throws SemanticException { + public int cost(Stack stack) throws HiveException { int numElems = (stack != null ? stack.size() : 0); String name = new String(); for (int pos = numElems - 1; pos >= 0; pos--) { diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java index ddc96c2..369aa1e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java +++ ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java @@ -22,7 +22,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * Rule interface for Nodes Used in Node dispatching to dispatch process/visitor @@ -56,10 +56,10 @@ public RuleRegExp(String ruleName, String regExp) { * @param stack * Node stack encountered so far * @return cost of the function - * @throws SemanticException + * @throws HiveException */ @Override - public int cost(Stack stack) throws SemanticException { + public int cost(Stack stack) throws HiveException { int numElems = (stack != null ? stack.size() : 0); String name = ""; for (int pos = numElems - 1; pos >= 0; pos--) { diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java index c01d018..0b75416 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java +++ ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hive.ql.exec.ConditionalTask; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; /** @@ -92,9 +93,9 @@ public TaskGraphWalker(Dispatcher disp) { * node being walked * @param ndStack * stack of nodes encountered - * @throws SemanticException + * @throws HiveException */ - public void dispatch(Node nd, Stack ndStack,TaskGraphWalkerContext walkerCtx) throws SemanticException { + public void dispatch(Node nd, Stack ndStack,TaskGraphWalkerContext walkerCtx) throws HiveException { Object[] nodeOutputs = null; if (nd.getChildren() != null) { nodeOutputs = new Object[nd.getChildren().size()+1]; @@ -112,7 +113,7 @@ public void dispatch(Node nd, Stack ndStack,TaskGraphWalkerContext walkerC retMap.put(nd, retVal); } - public void dispatch(Node nd, Stack ndStack) throws SemanticException { + public void dispatch(Node nd, Stack ndStack) throws HiveException { Object[] nodeOutputs = null; if (nd.getChildren() != null) { nodeOutputs = new Object[nd.getChildren().size()]; @@ -129,10 +130,10 @@ public void dispatch(Node nd, Stack ndStack) throws SemanticException { /** * starting point for walking. * - * @throws SemanticException + * @throws HiveException */ public void startWalking(Collection startNodes, - HashMap nodeOutput) throws SemanticException { + HashMap nodeOutput) throws HiveException { toWalk.addAll(startNodes); while (toWalk.size() > 0) { Node nd = toWalk.remove(0); @@ -148,9 +149,9 @@ public void startWalking(Collection startNodes, * * @param nd * current operator in the graph - * @throws SemanticException + * @throws HiveException */ - public void walk(Node nd) throws SemanticException { + public void walk(Node nd) throws HiveException { if(!(nd instanceof Task)){ throw new SemanticException("Task Graph Walker only walks for Task Graph"); } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 9004d2f..1043312 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -235,7 +235,7 @@ public void createDatabase(Database db, boolean ifNotExist) throw e; } } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -290,7 +290,7 @@ public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownD } catch (NoSuchObjectException e) { throw e; } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -606,7 +606,7 @@ public void createTable(Table tbl, boolean ifNotExists) throws HiveException { throw new HiveException(e); } } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -788,7 +788,7 @@ public void createIndex(String tableName, String indexName, String indexHandlerC this.getMSC().createIndex(indexDesc, tt); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -815,7 +815,7 @@ public Index getIndex(String dbName, String baseTableName, try { return this.getMSC().getIndex(dbName, baseTableName, indexName); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -879,7 +879,7 @@ public void dropTable(String dbName, String tableName, boolean deleteData, throw new HiveException(e); } } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1044,7 +1044,7 @@ public Table getTable(final String dbName, final String tableName, try { return getMSC().getTables(dbName, tablePattern); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1064,7 +1064,7 @@ public Table getTable(final String dbName, final String tableName, try { return getMSC().getTables(database, tablePattern); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1078,7 +1078,7 @@ public Table getTable(final String dbName, final String tableName, try { return getMSC().getAllDatabases(); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1095,7 +1095,7 @@ public Table getTable(final String dbName, final String tableName, try { return getMSC().getDatabases(databasePattern); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1104,7 +1104,7 @@ public boolean grantPrivileges(PrivilegeBag privileges) try { return getMSC().grant_privileges(privileges); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1119,7 +1119,7 @@ public boolean revokePrivileges(PrivilegeBag privileges) try { return getMSC().revoke_privileges(privileges); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1147,7 +1147,7 @@ public Database getDatabase(String dbName) throws HiveException { } catch (NoSuchObjectException e) { return null; } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1250,15 +1250,9 @@ public void loadPartition(Path loadPath, String tableName, newCreatedTpart = newTPart.getTPartition(); } } - } catch (IOException e) { - LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); - } catch (MetaException e) { - LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); - } catch (InvalidOperationException e) { + } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1497,7 +1491,7 @@ public Partition createPartition(Table tbl, Map partSpec) throws } } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } return out; } @@ -1595,7 +1589,7 @@ public Partition getPartition(Table tbl, Map partSpec, tpart = null; } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } try { if (forceCreate) { @@ -1634,7 +1628,7 @@ public Partition getPartition(Table tbl, Map partSpec, } } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } return new Partition(tbl, tpart); } @@ -1668,7 +1662,7 @@ public boolean dropPartition(String db_name, String tbl_name, names = getMSC().listPartitionNames(dbName, tblName, max); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } return names; } @@ -1684,7 +1678,7 @@ public boolean dropPartition(String db_name, String tbl_name, names = getMSC().listPartitionNames(dbName, tblName, pvals, max); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } return names; } @@ -1705,7 +1699,7 @@ public boolean dropPartition(String db_name, String tbl_name, (short) -1, getUserName(), getGroupNames()); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } List parts = new ArrayList(tParts.size()); for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) { @@ -1735,7 +1729,7 @@ public boolean dropPartition(String db_name, String tbl_name, tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getTableName(), (short)-1); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } Set parts = new LinkedHashSet(tParts.size()); for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) { @@ -1770,7 +1764,7 @@ public boolean dropPartition(String db_name, String tbl_name, partitions = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), tbl.getTableName(), partialPvals, limit, getUserName(), getGroupNames()); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } List qlPartitions = new ArrayList(); @@ -1871,7 +1865,7 @@ public boolean dropPartition(String db_name, String tbl_name, } } } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } return partitions; } @@ -1940,7 +1934,7 @@ public void validatePartitionNameCharacters(List partVals) throws HiveEx getMSC().validatePartitionNameCharacters(partVals); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1949,7 +1943,7 @@ public void createRole(String roleName, String ownerName) try { getMSC().create_role(new Role(roleName, -1, ownerName)); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1957,7 +1951,7 @@ public void dropRole(String roleName) throws HiveException { try { getMSC().drop_role(roleName); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1971,7 +1965,7 @@ public void dropRole(String roleName) throws HiveException { try { return getMSC().listRoleNames(); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1979,7 +1973,7 @@ public void dropRole(String roleName) throws HiveException { try { return getMSC().list_roles(principalName, principalType); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1990,7 +1984,7 @@ public boolean grantRole(String roleName, String userName, return getMSC().grant_role(roleName, userName, principalType, grantor, grantorType, grantOption); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -1999,7 +1993,7 @@ public boolean revokeRole(String roleName, String userName, try { return getMSC().revoke_role(roleName, userName, principalType); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -2008,7 +2002,7 @@ public boolean revokeRole(String roleName, String userName, try { return getMSC().list_roles(userName, principalType); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -2039,7 +2033,7 @@ public PrincipalPrivilegeSet get_privilege_set(HiveObjectType objectType, table_name, part_values, column_name); return getMSC().get_privilege_set(hiveObj, user_name, group_names); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -2064,7 +2058,7 @@ public PrincipalPrivilegeSet get_privilege_set(HiveObjectType objectType, partValues, columnName); return getMSC().list_privileges(principalName, principalType, hiveObj); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -2359,7 +2353,7 @@ public void exchangeTablePartitions(Map partitionSpecs, destinationTableName); } catch (Exception ex) { LOG.error(StringUtils.stringifyException(ex)); - throw new HiveException(ex); + throw HiveException.wrap(ex); } } @@ -2449,7 +2443,7 @@ private String getUserName() { indexes = getMSC().listIndexes(dbName, tblName, max); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } return indexes; } @@ -2459,7 +2453,7 @@ public boolean updateTableColumnStatistics(ColumnStatistics statsObj) throws Hiv return getMSC().updateTableColumnStatistics(statsObj); } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -2468,7 +2462,7 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) throws return getMSC().updatePartitionColumnStatistics(statsObj); } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -2478,7 +2472,7 @@ public ColumnStatistics getTableColumnStatistics(String dbName, String tableName return getMSC().getTableColumnStatistics(dbName, tableName, colName); } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -2489,7 +2483,7 @@ public ColumnStatistics getPartitionColumnStatistics(String dbName, String table return getMSC().getPartitionColumnStatistics(dbName, tableName, partName, colName); } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -2499,7 +2493,7 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri return getMSC().deleteTableColumnStatistics(dbName, tableName, colName); } catch(Exception e) { LOG.debug(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -2509,7 +2503,7 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName, return getMSC().deletePartitionColumnStatistics(dbName, tableName, partName, colName); } catch(Exception e) { LOG.debug(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -2536,7 +2530,7 @@ public String getDelegationToken(String owner, String renewer) return getMSC().getDelegationToken(owner, renewer); } catch(Exception e) { LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } } @@ -2546,7 +2540,7 @@ public void cancelDelegationToken(String tokenStrForm) getMSC().cancelDelegationToken(tokenStrForm); } catch(Exception e) { LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); + throw HiveException.wrap(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java index 1d895ca..6edc770 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java @@ -66,4 +66,8 @@ public HiveException(Throwable cause, ErrorMsg errorMsg, String... msgArgs) { public ErrorMsg getCanonicalErrorMsg() { return canonicalErrorMsg; } + + public static HiveException wrap(Throwable t) { + return t instanceof HiveException ? (HiveException)t : new HiveException(t); + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java index 143c0a6..ea3f25c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java @@ -350,7 +350,7 @@ public static HiveAuthorizationProvider getAuthorizeProviderManager( ret = ReflectionUtils.newInstance(cls, conf); } } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } ret.setAuthenticator(authenticator); return ret; @@ -376,7 +376,7 @@ public static HiveAuthenticationProvider getAuthenticator( ret = ReflectionUtils.newInstance(cls, conf); } } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } return ret; } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java index 9b0d482..c7a42b5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java @@ -87,7 +87,7 @@ public void error(OutputStream out, String errorMessage, int errorCode, String s } out.write(terminator); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java index 2850c7f..7d88f1e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java @@ -73,10 +73,10 @@ public AbstractBucketJoinProc() { @Override abstract public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException; + Object... nodeOutputs) throws HiveException; private static List getBucketFilePathsOfPartition( - Path location, ParseContext pGraphContext) throws SemanticException { + Path location, ParseContext pGraphContext) throws HiveException { List fileNames = new ArrayList(); try { FileSystem fs = location.getFileSystem(pGraphContext.getConf()); @@ -87,7 +87,7 @@ abstract public Object process(Node nd, Stack stack, NodeProcessorCtx proc } } } catch (IOException e) { - throw new SemanticException(e); + throw new HiveException(e); } return fileNames; } @@ -132,7 +132,7 @@ private boolean checkNumberOfBucketsAgainstBigTable( protected boolean canConvertMapJoinToBucketMapJoin( MapJoinOperator mapJoinOp, ParseContext pGraphContext, - BucketJoinProcCtx context) throws SemanticException { + BucketJoinProcCtx context) throws HiveException { QBJoinTree joinCtx = this.pGraphContext.getMapJoinContext().get(mapJoinOp); if (joinCtx == null) { @@ -194,7 +194,7 @@ protected boolean checkConvertBucketMapJoin( QBJoinTree joinCtx, Map> keysMap, String baseBigAlias, - List joinAliases) throws SemanticException { + List joinAliases) throws HiveException { LinkedHashMap> tblAliasToNumberOfBucketsInEachPartition = new LinkedHashMap>(); @@ -273,7 +273,7 @@ protected boolean checkConvertBucketMapJoin( // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); + throw e; } List partitions = prunedParts.getNotDeniedPartns(); // construct a mapping of (Partition->bucket file names) and (Partition -> bucket number) @@ -372,7 +372,7 @@ protected boolean checkConvertBucketMapJoin( */ protected void convertMapJoinToBucketMapJoin( MapJoinOperator mapJoinOp, - BucketJoinProcCtx context) throws SemanticException { + BucketJoinProcCtx context) throws HiveException { MapJoinDesc desc = mapJoinOp.getConf(); Map>> aliasBucketFileNameMapping = diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java index 0b7b1a3..01ffd4b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java @@ -74,7 +74,7 @@ public AbstractSMBJoinProc() { @Override abstract public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException; + Object... nodeOutputs) throws HiveException; /* * Return true or false based on whether a bucketed mapjoin can be converted successfully to @@ -86,7 +86,7 @@ abstract public Object process(Node nd, Stack stack, NodeProcessorCtx proc protected boolean canConvertBucketMapJoinToSMBJoin(MapJoinOperator mapJoinOp, Stack stack, SortBucketJoinProcCtx smbJoinContext, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // Check whether the mapjoin is a bucketed mapjoin. // The above can be ascertained by checking the big table bucket -> small table buckets @@ -239,7 +239,7 @@ protected SMBMapJoinOperator convertBucketMapJoinToSMBJoin(MapJoinOperator mapJo * @param sortColumnsFirstTable The names and order of the sorted columns for the first table. * It is not initialized when pos = 0. * @return - * @throws SemanticException + * @throws HiveException */ private boolean isEligibleForBucketSortMergeJoin( SortBucketJoinProcCtx smbJoinContext, @@ -248,7 +248,7 @@ private boolean isEligibleForBucketSortMergeJoin( QBJoinTree joinTree, String[] aliases, int pos, - List sortColumnsFirstTable) throws SemanticException { + List sortColumnsFirstTable) throws HiveException { String alias = aliases[pos]; Map topToTable = this.pGraphContext .getTopToTable(); @@ -321,7 +321,7 @@ private boolean isEligibleForBucketSortMergeJoin( prunedParts = pGraphContext.getPrunedPartitions(alias, tso); } catch (HiveException e) { LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); + throw e; } List partitions = prunedParts.getNotDeniedPartns(); // Populate the names and order of columns for the first partition of the @@ -392,7 +392,7 @@ private boolean checkSortColsAndJoinCols(List sortCols, protected boolean checkConvertJoinToSMBJoin( JoinOperator joinOperator, SortBucketJoinProcCtx smbJoinContext, - ParseContext pGraphContext) throws SemanticException { + ParseContext pGraphContext) throws HiveException { QBJoinTree joinCtx = pGraphContext.getJoinContext().get(joinOperator); @@ -427,7 +427,7 @@ protected boolean checkConvertJoinToSMBJoin( protected boolean canConvertJoinToSMBJoin( JoinOperator joinOperator, SortBucketJoinProcCtx smbJoinContext, - ParseContext pGraphContext) throws SemanticException { + ParseContext pGraphContext) throws HiveException { boolean canConvert = canConvertJoinToBucketMapJoin( joinOperator, @@ -446,7 +446,7 @@ protected boolean canConvertJoinToSMBJoin( protected boolean canConvertJoinToBucketMapJoin( JoinOperator joinOp, ParseContext pGraphContext, - SortBucketJoinProcCtx context) throws SemanticException { + SortBucketJoinProcCtx context) throws HiveException { // This has already been inspected and rejected if (context.getRejectedJoinOps().contains(joinOp)) { @@ -465,7 +465,7 @@ protected boolean canConvertJoinToBucketMapJoin( (Class.forName(HiveConf.getVar(pGraphContext.getConf(), HiveConf.ConfVars.HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR))); } catch (ClassNotFoundException e) { - throw new SemanticException(e.getMessage()); + throw new HiveException(e); } BigTableSelectorForAutoSMJ bigTableMatcher = @@ -525,7 +525,7 @@ protected boolean canConvertJoinToBucketMapJoin( protected MapJoinOperator convertJoinToBucketMapJoin( JoinOperator joinOp, SortBucketJoinProcCtx joinContext, - ParseContext parseContext) throws SemanticException { + ParseContext parseContext) throws HiveException { MapJoinOperator mapJoinOp = MapJoinProcessor.convertMapJoin( parseContext.getConf(), parseContext.getOpParseCtx(), @@ -545,7 +545,7 @@ protected MapJoinOperator convertJoinToBucketMapJoin( protected void convertJoinToSMBJoin( JoinOperator joinOp, SortBucketJoinProcCtx smbJoinContext, - ParseContext parseContext) throws SemanticException { + ParseContext parseContext) throws HiveException { MapJoinOperator mapJoinOp = convertJoinToBucketMapJoin(joinOp, smbJoinContext, parseContext); SMBMapJoinOperator smbMapJoinOp = convertBucketMapJoinToSMBJoin(mapJoinOp, smbJoinContext, parseContext); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.java index 8719576..14e71f5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; -import org.apache.hadoop.hive.ql.parse.SemanticException; /* * This is a pluggable policy to choose the candidate map-join table for converting a join to a @@ -48,63 +47,59 @@ public int getBigTablePosition(ParseContext parseCtx, JoinOperator joinOp, Set bigTableCandidates) - throws SemanticException { + throws HiveException { int bigTablePos = -1; long maxSize = -1; int numPartitionsCurrentBigTable = 0; // number of partitions for the chosen big table HiveConf conf = parseCtx.getConf(); - try { - List topOps = new ArrayList(); - getListTopOps(joinOp, topOps); - int currentPos = 0; - for (TableScanOperator topOp : topOps) { + List topOps = new ArrayList(); + getListTopOps(joinOp, topOps); + int currentPos = 0; + for (TableScanOperator topOp : topOps) { - if (topOp == null) { - return -1; - } + if (topOp == null) { + return -1; + } - if (!bigTableCandidates.contains(currentPos)) { - currentPos++; - continue; - } + if (!bigTableCandidates.contains(currentPos)) { + currentPos++; + continue; + } - int numPartitions = 1; // in case the sizes match, preference is - // given to the table with fewer partitions - Table table = parseCtx.getTopToTable().get(topOp); - long averageSize = 0; + int numPartitions = 1; // in case the sizes match, preference is + // given to the table with fewer partitions + Table table = parseCtx.getTopToTable().get(topOp); - if (!table.isPartitioned()) { - averageSize = getSize(conf, table); - } - else { - // For partitioned tables, get the size of all the partitions - PrunedPartitionList partsList = PartitionPruner.prune(topOp, parseCtx, null); - numPartitions = partsList.getNotDeniedPartns().size(); - long totalSize = 0; - for (Partition part : partsList.getNotDeniedPartns()) { - totalSize += getSize(conf, part); - } - averageSize = numPartitions == 0 ? 0 : totalSize/numPartitions; + long averageSize; + if (!table.isPartitioned()) { + averageSize = getSize(conf, table); + } + else { + // For partitioned tables, get the size of all the partitions + PrunedPartitionList partsList = PartitionPruner.prune(topOp, parseCtx, null); + numPartitions = partsList.getNotDeniedPartns().size(); + long totalSize = 0; + for (Partition part : partsList.getNotDeniedPartns()) { + totalSize += getSize(conf, part); } + averageSize = numPartitions == 0 ? 0 : totalSize/numPartitions; + } - if (averageSize > maxSize) { - maxSize = averageSize; + if (averageSize > maxSize) { + maxSize = averageSize; + bigTablePos = currentPos; + numPartitionsCurrentBigTable = numPartitions; + } + // If the sizes match, prefer the table with fewer partitions + else if (averageSize == maxSize) { + if (numPartitions < numPartitionsCurrentBigTable) { bigTablePos = currentPos; numPartitionsCurrentBigTable = numPartitions; } - // If the sizes match, prefer the table with fewer partitions - else if (averageSize == maxSize) { - if (numPartitions < numPartitionsCurrentBigTable) { - bigTablePos = currentPos; - numPartitionsCurrentBigTable = numPartitions; - } - } - - currentPos++; } - } catch (HiveException e) { - throw new SemanticException(e.getMessage()); + + currentPos++; } return bigTablePos; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java index c411bf5..ca6cc1d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java @@ -21,8 +21,8 @@ import java.util.Set; import org.apache.hadoop.hive.ql.exec.JoinOperator; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; /* * This is a plug-able policy to chose the candidate map-join table for converting a join to a @@ -32,5 +32,5 @@ public interface BigTableSelectorForAutoSMJ { public int getBigTablePosition(ParseContext parseContext, JoinOperator joinOp, Set joinCandidates) - throws SemanticException; + throws HiveException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java index 6f35b87..88d8e6a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java @@ -35,8 +35,8 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; /** * this transformation does bucket map join optimization. @@ -50,7 +50,7 @@ public BucketMapJoinOptimizer() { } @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { Map opRules = new LinkedHashMap(); BucketJoinProcCtx bucketMapJoinOptimizeCtx = @@ -84,7 +84,7 @@ private NodeProcessor getDefaultProc() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) - throws SemanticException { + throws HiveException { return null; } }; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java index 1260c83..2eb5223 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -36,7 +37,7 @@ public BucketMapjoinProc(ParseContext pGraphContext) { @Override @SuppressWarnings("unchecked") public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { BucketJoinProcCtx context = (BucketJoinProcCtx) procCtx; MapJoinOperator mapJoinOperator = (MapJoinOperator) nd; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java index 6caabdc..bc1eacd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java @@ -46,11 +46,11 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; @@ -76,7 +76,7 @@ public BucketingSortingReduceSinkOptimizer() { } @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { Map opRules = new LinkedHashMap(); @@ -103,7 +103,7 @@ private NodeProcessor getDefaultProc() { return new NodeProcessor() { @Override public Object process(Node nd, Stack stack, - NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { + NodeProcessorCtx procCtx, Object... nodeOutputs) throws HiveException { return null; } }; @@ -357,7 +357,7 @@ else if (tableTag != columnTableMappings[colNumber]) { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // If the reduce sink has not been introduced due to bucketing/sorting, ignore it FileSinkOperator fsOp = (FileSinkOperator) nd; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java index 58a9b59..1499e7a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java @@ -44,9 +44,9 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.OpParseContext; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.OperatorDesc; /** @@ -76,7 +76,7 @@ public ColumnPruner() { * @param pactx * the current parse context */ - public ParseContext transform(ParseContext pactx) throws SemanticException { + public ParseContext transform(ParseContext pactx) throws HiveException { pGraphContext = pactx; opToParseCtxMap = pGraphContext.getOpParseCtx(); @@ -144,7 +144,7 @@ public ColumnPrunerWalker(Dispatcher disp) { * Walk the given operator. */ @Override - public void walk(Node nd) throws SemanticException { + public void walk(Node nd) throws HiveException { boolean walkChildren = true; opStack.push(nd); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java index db36151..9c560cc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java @@ -28,9 +28,9 @@ import org.apache.hadoop.hive.ql.exec.SelectOperator; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.OpParseContext; import org.apache.hadoop.hive.ql.parse.RowResolver; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.SelectDesc; @@ -81,10 +81,10 @@ public ColumnPrunerProcCtx( * @param curOp * The root of the operator subtree. * @return List of the internal column names. - * @throws SemanticException + * @throws HiveException */ public List genColLists(Operator curOp) - throws SemanticException { + throws HiveException { List colList = new ArrayList(); if (curOp.getChildOperators() != null) { for (Operator child : curOp.getChildOperators()) { @@ -161,7 +161,7 @@ public ColumnPrunerProcCtx( * Create the list of internal columns for select tag of LV */ public List getSelectColsFromLVJoin(RowResolver rr, - List colList) throws SemanticException { + List colList) throws HiveException { List columns = new ArrayList(); for (String col : colList) { if (rr.reverseLookup(col) != null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java index 0798470..55cd85d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java @@ -54,10 +54,10 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.parse.OpParseContext; import org.apache.hadoop.hive.ql.parse.RowResolver; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.AggregationDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -92,7 +92,7 @@ private ColumnPrunerProcFactory() { */ public static class ColumnPrunerFilterProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { FilterOperator op = (FilterOperator) nd; ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx; ExprNodeDesc condn = op.getConf().getPredicate(); @@ -125,7 +125,7 @@ public static ColumnPrunerFilterProc getFilterProc() { */ public static class ColumnPrunerGroupByProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { GroupByOperator op = (GroupByOperator) nd; ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx; List colLists = new ArrayList(); @@ -167,7 +167,7 @@ public static ColumnPrunerGroupByProc getGroupByProc() { */ public static class ColumnPrunerPTFProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { PTFOperator op = (PTFOperator) nd; PTFDesc conf = op.getConf(); @@ -194,7 +194,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, } private static RowResolver buildPrunedRR(List prunedCols, - RowResolver oldRR, ArrayList sig) throws SemanticException{ + RowResolver oldRR, ArrayList sig) throws HiveException{ RowResolver newRR = new RowResolver(); HashSet prunedColsSet = new HashSet(prunedCols); for(ColumnInfo cInfo : oldRR.getRowSchema().getSignature()) { @@ -270,7 +270,7 @@ public static ColumnPrunerPTFProc getPTFProc() { */ public static class ColumnPrunerDefaultProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx; cppCtx.getPrunedColLists().put((Operator) nd, cppCtx.genColLists((Operator) nd)); @@ -294,7 +294,7 @@ public static ColumnPrunerDefaultProc getDefaultProc() { */ public static class ColumnPrunerTableScanProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { TableScanOperator scanOp = (TableScanOperator) nd; ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx; List cols = cppCtx @@ -360,7 +360,7 @@ public static ColumnPrunerTableScanProc getTableScanProc() { */ public static class ColumnPrunerReduceSinkProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ReduceSinkOperator op = (ReduceSinkOperator) nd; ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx; HashMap, OpParseContext> opToParseCtxMap = cppCtx @@ -468,7 +468,7 @@ public static ColumnPrunerReduceSinkProc getReduceSinkProc() { */ public static class ColumnPrunerLateralViewJoinProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { LateralViewJoinOperator op = (LateralViewJoinOperator) nd; ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx; List cols = cppCtx.genColLists(op); @@ -517,7 +517,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, public static class ColumnPrunerLateralViewForwardProc extends ColumnPrunerDefaultProc { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { super.process(nd, stack, ctx, nodeOutputs); LateralViewForwardOperator op = (LateralViewForwardOperator) nd; ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx; @@ -554,7 +554,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, */ public static class ColumnPrunerSelectProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { SelectOperator op = (SelectOperator) nd; ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx; @@ -641,10 +641,10 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, * * @param op * @param retainedSelOutputCols - * @throws SemanticException + * @throws HiveException */ private void handleChildren(SelectOperator op, - List retainedSelOutputCols, ColumnPrunerProcCtx cppCtx) throws SemanticException { + List retainedSelOutputCols, ColumnPrunerProcCtx cppCtx) throws HiveException { for (Operator child : op.getChildOperators()) { if (child instanceof ReduceSinkOperator) { boolean[] flags = getPruneReduceSinkOpRetainFlags( @@ -690,7 +690,7 @@ private void handleChildren(SelectOperator op, } private static void pruneReduceSinkOperator(boolean[] retainFlags, - ReduceSinkOperator reduce, ColumnPrunerProcCtx cppCtx) throws SemanticException { + ReduceSinkOperator reduce, ColumnPrunerProcCtx cppCtx) throws HiveException { ReduceSinkDesc reduceConf = reduce.getConf(); Map oldMap = reduce.getColumnExprMap(); LOG.info("RS " + reduce.getIdentifier() + " oldColExprMap: " + oldMap); @@ -760,7 +760,7 @@ public static ColumnPrunerLateralViewForwardProc getLateralViewForwardProc() { */ public static class ColumnPrunerJoinProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { JoinOperator op = (JoinOperator) nd; pruneJoinOperator(ctx, op, op.getConf(), op.getColumnExprMap(), null, false); @@ -782,7 +782,7 @@ public static ColumnPrunerJoinProc getJoinProc() { */ public static class ColumnPrunerMapJoinProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { MapJoinOperator op = (MapJoinOperator) nd; pruneJoinOperator(ctx, op, op.getConf(), op.getColumnExprMap(), op .getConf().getRetainList(), true); @@ -793,7 +793,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, private static void pruneOperator(NodeProcessorCtx ctx, Operator op, List cols) - throws SemanticException { + throws HiveException { // the pruning needs to preserve the order of columns in the input schema RowSchema inputSchema = op.getSchema(); if (inputSchema != null) { @@ -813,11 +813,11 @@ private static void pruneOperator(NodeProcessorCtx ctx, * @param op * @param cols * @return - * @throws SemanticException + * @throws HiveException */ private static List preserveColumnOrder(Operator op, List cols) - throws SemanticException { + throws HiveException { RowSchema inputSchema = op.getSchema(); if (inputSchema != null) { ArrayList rs = new ArrayList(); @@ -837,7 +837,7 @@ private static void pruneOperator(NodeProcessorCtx ctx, private static void pruneJoinOperator(NodeProcessorCtx ctx, CommonJoinOperator op, JoinDesc conf, Map columnExprMap, - Map> retainMap, boolean mapJoin) throws SemanticException { + Map> retainMap, boolean mapJoin) throws HiveException { ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx; Map> prunedColLists = new HashMap>(); List> childOperators = op diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java index 384342a..bf2d0c2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.OptimizeTezProcContext; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.Statistics; @@ -58,7 +57,7 @@ */ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) - throws SemanticException { + throws HiveException { OptimizeTezProcContext context = (OptimizeTezProcContext) procCtx; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java index 978c18d..ea07576 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java @@ -36,8 +36,8 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; @@ -61,7 +61,7 @@ public GenMRFileSink1() { * context */ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { GenMRProcContext ctx = (GenMRProcContext) opProcCtx; ParseContext parseCtx = ctx.getParseCtx(); boolean chDir = false; @@ -130,7 +130,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, * Use the task created by the first linked file descriptor */ private void processLinkedFileDesc(GenMRProcContext ctx, - Task childTask) throws SemanticException { + Task childTask) throws HiveException { Task currTask = ctx.getCurrTask(); Operator currTopOp = ctx.getCurrTopOp(); if (currTopOp != null && !ctx.isSeenOp(currTask, currTopOp)) { @@ -155,10 +155,10 @@ private void processLinkedFileDesc(GenMRProcContext ctx, * whether the operator should be first output to a tmp dir and then merged * to the final dir later * @return the final file name to which the FileSinkOperator should store. - * @throws SemanticException + * @throws HiveException */ private Path processFS(FileSinkOperator fsOp, Stack stack, - NodeProcessorCtx opProcCtx, boolean chDir) throws SemanticException { + NodeProcessorCtx opProcCtx, boolean chDir) throws HiveException { GenMRProcContext ctx = (GenMRProcContext) opProcCtx; Task currTask = ctx.getCurrTask(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMROperator.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMROperator.java index 4f3eb06..86bb056 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMROperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMROperator.java @@ -25,8 +25,8 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.OperatorDesc; /** @@ -46,7 +46,7 @@ public GenMROperator() { * context */ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { GenMRProcContext ctx = (GenMRProcContext) procCtx; Map, GenMapRedCtx> mapCurrCtx = ctx diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java index bc8fed4..94849ed 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java @@ -28,8 +28,8 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; @@ -54,7 +54,7 @@ public GenMRRedSink1() { * context */ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ReduceSinkOperator op = (ReduceSinkOperator) nd; GenMRProcContext ctx = (GenMRProcContext) opProcCtx; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java index 6c34bfe..75b80c8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java @@ -28,8 +28,8 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.OperatorDesc; /** @@ -49,7 +49,7 @@ public GenMRRedSink2() { * context */ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ReduceSinkOperator op = (ReduceSinkOperator) nd; GenMRProcContext ctx = (GenMRProcContext) opProcCtx; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java index e4f6292..65c36cf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java @@ -31,8 +31,8 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Utils; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; @@ -53,7 +53,7 @@ public GenMRRedSink3() { * context */ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ReduceSinkOperator op = (ReduceSinkOperator) nd; GenMRProcContext ctx = (GenMRProcContext) opProcCtx; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java index 9c80714..6c8ef62 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; @@ -68,7 +69,7 @@ public GenMRTableScan1() { * context */ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { TableScanOperator op = (TableScanOperator) nd; GenMRProcContext ctx = (GenMRProcContext) opProcCtx; ParseContext parseCtx = ctx.getParseCtx(); @@ -167,12 +168,12 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, * @param parseInfo * @param statsWork * @param statsTask - * @throws SemanticException + * @throws HiveException */ private void handlePartialScanCommand(TableScanOperator op, GenMRProcContext ctx, ParseContext parseCtx, Task currTask, QBParseInfo parseInfo, StatsWork statsWork, - Task statsTask) throws SemanticException { + Task statsTask) throws HiveException { String aggregationKey = op.getConf().getStatsAggPrefix(); List inputPaths = new ArrayList(); switch (parseInfo.getTableSpec().specType) { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java index c580818..ae6a811 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java @@ -31,13 +31,13 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMRUnionCtx; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext.UnionParseContext; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcFactory; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; @@ -55,10 +55,10 @@ public GenMRUnion1() { * Process the union if all sub-queries are map-only * * @return - * @throws SemanticException + * @throws HiveException */ private Object processMapOnlyUnion(UnionOperator union, Stack stack, - GenMRProcContext ctx, UnionProcContext uCtx) throws SemanticException { + GenMRProcContext ctx, UnionProcContext uCtx) throws HiveException { // merge currTask from multiple topOps GenMRUnionCtx uCtxTask = ctx.getUnionTask(union); @@ -160,11 +160,11 @@ private void processSubQueryUnionCreateIntermediate( * @param uCtxTask * @param union * @param stack - * @throws SemanticException + * @throws HiveException */ private void processSubQueryUnionMerge(GenMRProcContext ctx, GenMRUnionCtx uCtxTask, UnionOperator union, Stack stack) - throws SemanticException { + throws HiveException { // The current plan can be thrown away after being merged with the union // plan Task uTask = uCtxTask.getUTask(); @@ -189,7 +189,7 @@ private void processSubQueryUnionMerge(GenMRProcContext ctx, * context */ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { UnionOperator union = (UnionOperator) nd; GenMRProcContext ctx = (GenMRProcContext) opProcCtx; ParseContext parseCtx = ctx.getParseCtx(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index ac9df5e..3d5b9cf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -121,7 +121,7 @@ public static boolean needsTagging(ReduceWork rWork) { * processing context */ public static void initPlan(ReduceSinkOperator op, GenMRProcContext opProcCtx) - throws SemanticException { + throws HiveException { Operator reducer = op.getChildOperators().get(0); Map, GenMapRedCtx> mapCurrCtx = opProcCtx.getMapCurrCtx(); @@ -169,7 +169,7 @@ public static void initPlan(ReduceSinkOperator op, GenMRProcContext opProcCtx) */ public static void initUnionPlan(ReduceSinkOperator op, UnionOperator currUnionOp, GenMRProcContext opProcCtx, - Task unionTask) throws SemanticException { + Task unionTask) throws HiveException { Operator reducer = op.getChildOperators().get(0); MapredWork plan = (MapredWork) unionTask.getWork(); @@ -194,7 +194,7 @@ public static void initUnionPlan(ReduceSinkOperator op, UnionOperator currUnionO private static void setUnionPlan(GenMRProcContext opProcCtx, boolean local, Task currTask, GenMRUnionCtx uCtx, - boolean mergeTask) throws SemanticException { + boolean mergeTask) throws HiveException { Operator currTopOp = opProcCtx.getCurrTopOp(); if (currTopOp != null) { @@ -240,7 +240,7 @@ private static void setUnionPlan(GenMRProcContext opProcCtx, */ public static void initUnionPlan(GenMRProcContext opProcCtx, UnionOperator currUnionOp, Task currTask, boolean local) - throws SemanticException { + throws HiveException { // In case of lateral views followed by a join, the same tree // can be traversed more than one if (currUnionOp != null) { @@ -257,7 +257,7 @@ public static void joinUnionPlan(GenMRProcContext opProcCtx, UnionOperator currUnionOp, Task currentUnionTask, Task existingTask, boolean local) - throws SemanticException { + throws HiveException { assert currUnionOp != null; GenMRUnionCtx uCtx = opProcCtx.getUnionTask(currUnionOp); assert uCtx != null; @@ -308,7 +308,7 @@ public static void joinUnionPlan(GenMRProcContext opProcCtx, */ public static void joinPlan(Task currTask, Task oldTask, GenMRProcContext opProcCtx) - throws SemanticException { + throws HiveException { assert currTask != null && oldTask != null; Operator currTopOp = opProcCtx.getCurrTopOp(); @@ -349,7 +349,7 @@ public static void joinPlan(Task currTask, */ static boolean mergeInput(Operator currTopOp, GenMRProcContext opProcCtx, Task task, boolean local) - throws SemanticException { + throws HiveException { if (!opProcCtx.isSeenOp(task, currTopOp)) { String currAliasId = opProcCtx.getCurrAliasId(); setTaskPlan(currAliasId, currTopOp, task, local, opProcCtx); @@ -364,7 +364,7 @@ static boolean mergeInput(Operator currTopOp, */ static void splitPlan(ReduceSinkOperator cRS, Task parentTask, Task childTask, - GenMRProcContext opProcCtx) throws SemanticException { + GenMRProcContext opProcCtx) throws HiveException { assert parentTask != null && childTask != null; splitTasks(cRS, parentTask, childTask, opProcCtx); } @@ -379,7 +379,7 @@ static void splitPlan(ReduceSinkOperator cRS, * processing context */ static void splitPlan(ReduceSinkOperator cRS, GenMRProcContext opProcCtx) - throws SemanticException { + throws HiveException { // Generate a new task ParseContext parseCtx = opProcCtx.getParseCtx(); Task parentTask = opProcCtx.getCurrTask(); @@ -417,7 +417,7 @@ static void splitPlan(ReduceSinkOperator cRS, GenMRProcContext opProcCtx) */ public static void setTaskPlan(String alias_id, Operator topOp, Task task, boolean local, - GenMRProcContext opProcCtx) throws SemanticException { + GenMRProcContext opProcCtx) throws HiveException { setTaskPlan(alias_id, topOp, task, local, opProcCtx, null); } @@ -461,7 +461,7 @@ private static ReadEntity getParentViewInfo(String alias_id, */ public static void setTaskPlan(String alias_id, Operator topOp, Task task, boolean local, - GenMRProcContext opProcCtx, PrunedPartitionList pList) throws SemanticException { + GenMRProcContext opProcCtx, PrunedPartitionList pList) throws HiveException { setMapWork(((MapredWork) task.getWork()).getMapWork(), opProcCtx.getParseCtx(), opProcCtx.getInputs(), pList, topOp, alias_id, opProcCtx.getConf(), local); opProcCtx.addSeenOp(task, topOp); @@ -487,7 +487,7 @@ public static void setTaskPlan(String alias_id, */ public static void setMapWork(MapWork plan, ParseContext parseCtx, Set inputs, PrunedPartitionList partsList, Operator topOp, String alias_id, - HiveConf conf, boolean local) throws SemanticException { + HiveConf conf, boolean local) throws HiveException { ArrayList partDir = new ArrayList(); ArrayList partDesc = new ArrayList(); @@ -500,11 +500,9 @@ public static void setMapWork(MapWork plan, ParseContext parseCtx, Set topOp, MapWork plan, boolean local, - TableDesc tt_desc) throws SemanticException { + TableDesc tt_desc) throws HiveException { if (path == null || alias == null) { return; @@ -990,7 +988,7 @@ protected static TableScanOperator createTemporaryFile( **/ private static void splitTasks(ReduceSinkOperator op, Task parentTask, Task childTask, - GenMRProcContext opProcCtx) throws SemanticException { + GenMRProcContext opProcCtx) throws HiveException { if (op.getNumParent() != 1) { throw new IllegalStateException("Expecting operator " + op + " to have one parent. " + "But found multiple parents : " + op.getParentOperators()); @@ -1150,7 +1148,7 @@ public static void replaceMapWork(String sourceAlias, String targetAlias, * @param mvTasks * @param conf * @param currTask - * @throws SemanticException + * @throws HiveException * create a Map-only merge job using CombineHiveInputFormat for all partitions with * following operators: @@ -1181,7 +1179,7 @@ public static void replaceMapWork(String sourceAlias, String targetAlias, public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, Path finalName, DependencyCollectionTask dependencyTask, List> mvTasks, HiveConf conf, - Task currTask) throws SemanticException { + Task currTask) throws HiveException { // // 1. create the operator tree @@ -1258,7 +1256,7 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, work = cplan; } catch (ClassNotFoundException e) { String msg = "Illegal input format class: " + inputFormatClass; - throw new SemanticException(msg); + throw new HiveException(msg); } } else { @@ -1481,7 +1479,7 @@ private static MapWork createMRWorkForMergingFiles (HiveConf conf, * null otherwise */ public static MapWork createRCFileMergeTask(FileSinkDesc fsInputDesc, - Path finalName, boolean hasDynamicPartitions) throws SemanticException { + Path finalName, boolean hasDynamicPartitions) throws HiveException { Path inputDir = fsInputDesc.getFinalDirName(); TableDesc tblDesc = fsInputDesc.getTableInfo(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java index 603a261..afcffe7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; import org.apache.hadoop.hive.ql.parse.QB; import org.apache.hadoop.hive.ql.parse.QBParseInfo; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.SplitSample; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; @@ -56,7 +55,7 @@ private final Log LOG = LogFactory.getLog(GlobalLimitOptimizer.class.getName()); - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { Context ctx = pctx.getContext(); Map> topOps = pctx.getTopOps(); GlobalLimitCtx globalLimitCtx = pctx.getGlobalLimitCtx(); @@ -112,7 +111,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); + throw e; } // If there is any unknown partition, create a map-reduce job for diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java index c16010f..3c19373 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java @@ -77,7 +77,7 @@ public GroupByOptimizer() { } @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { Map opRules = new LinkedHashMap(); HiveConf conf = pctx.getConf(); @@ -119,7 +119,7 @@ private NodeProcessor getDefaultProc() { return new NodeProcessor() { @Override public Object process(Node nd, Stack stack, - NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { + NodeProcessorCtx procCtx, Object... nodeOutputs) throws HiveException { return null; } }; @@ -170,7 +170,7 @@ protected boolean checkGroupByOperatorProcessed( protected void processGroupBy(GroupByOptimizerContext ctx, Stack stack, GroupByOperator groupByOp, - int depth) throws SemanticException { + int depth) throws HiveException { HiveConf hiveConf = ctx.getConf(); GroupByOptimizerSortMatch match = checkSortGroupBy(stack, groupByOp); boolean useMapperSort = @@ -257,7 +257,7 @@ else if (setBucketGroup) { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // GBY,RS,GBY... (top to bottom) GroupByOperator groupByOp = (GroupByOperator) stack.get(stack.size() - 3); @@ -273,7 +273,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // the base table for the group by matches the skewed keys protected GroupByOptimizerSortMatch checkSortGroupBy(Stack stack, GroupByOperator groupByOp) - throws SemanticException { + throws HiveException { // if this is not a HASH groupby, return if (groupByOp.getConf().getMode() != GroupByDesc.Mode.HASH) { @@ -393,7 +393,7 @@ else if ((expr instanceof ExprNodeConstantDesc) || partsList = pGraphContext.getPrunedPartitions(table.getTableName(), tableScanOp); } catch (HiveException e) { LOG.error(StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); + throw e; } List notDeniedPartns = partsList.getNotDeniedPartns(); @@ -458,12 +458,12 @@ private ColumnOrderMatch matchColumnOrder(List cols1, List cols2 * @param bucketCols * @param sortCols * @return - * @throws SemanticException + * @throws HiveException */ private GroupByOptimizerSortMatch matchBucketSortCols( List groupByCols, List bucketCols, - List sortCols) throws SemanticException { + List sortCols) throws HiveException { /* * >> Super set of @@ -544,7 +544,7 @@ public SortGroupBySkewProcessor(ParseContext pGraphContext) { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // GBY,RS,GBY,RS,GBY... (top to bottom) GroupByOperator groupByOp = (GroupByOperator) stack.get(stack.size() - 5); GroupByOptimizerContext ctx = (GroupByOptimizerContext) procCtx; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java index f1ef4ce..94ba497 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java @@ -224,7 +224,7 @@ private static boolean isIndexTableFresh(Hive hive, List indexes, Table s * Get a list of indexes on a table that match given types. */ public static List getIndexes(Table baseTableMetaData, List matchIndexTypes) - throws SemanticException { + throws HiveException { List matchingIndexes = new ArrayList(); List indexesOnTable = null; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java index 9238e0e..ae64446 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java @@ -25,9 +25,9 @@ import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.QBJoinTree; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.OperatorDesc; /** @@ -152,7 +152,7 @@ private void reorder(JoinOperator joinOp, Set bigTables) { * @param pactx * current parse context */ - public ParseContext transform(ParseContext pactx) throws SemanticException { + public ParseContext transform(ParseContext pactx) throws HiveException { Set bigTables = getBigTables(pactx); for (JoinOperator joinOp : pactx.getJoinContext().keySet()) { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java index f80941e..aa9c27d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -86,7 +87,7 @@ */ public class LimitPushdownOptimizer implements Transform { - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { Map opRules = new LinkedHashMap(); opRules.put(new RuleRegExp("R1", ReduceSinkOperator.getOperatorName() + "%" + @@ -106,7 +107,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { private static class TopNReducer implements NodeProcessor { public Object process(Node nd, Stack stack, - NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { + NodeProcessorCtx procCtx, Object... nodeOutputs) throws HiveException { ReduceSinkOperator rs = null; for (int i = stack.size() - 2 ; i >= 0; i--) { Operator operator = (Operator) stack.get(i); @@ -137,7 +138,7 @@ public Object process(Node nd, Stack stack, private float threshold; - public LimitPushdownContext(HiveConf conf) throws SemanticException { + public LimitPushdownContext(HiveConf conf) throws HiveException { threshold = conf.getFloatVar(HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE); if (threshold <= 0 || threshold >= 1) { throw new SemanticException("Invalid memory usage value " + threshold + diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java index b5f939b..ce447d3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java @@ -30,8 +30,8 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.BucketMapJoinContext; import org.apache.hadoop.hive.ql.plan.MapJoinDesc; import org.apache.hadoop.hive.ql.plan.MapWork; @@ -147,7 +147,7 @@ public static void setupBucketMapJoinInfo(MapWork plan, private static void initMapJoinPlan(AbstractMapJoinOperator op, Task currTask, GenMRProcContext opProcCtx, boolean local) - throws SemanticException { + throws HiveException { // The map is overloaded to keep track of mapjoins also opProcCtx.getOpTaskMap().put(op, currTask); @@ -173,7 +173,7 @@ private static void initMapJoinPlan(AbstractMapJoinOperator op, Task oldTask, GenMRProcContext opProcCtx, boolean local) - throws SemanticException { + throws HiveException { Operator currTopOp = opProcCtx.getCurrTopOp(); GenMapRedUtils.mergeInput(currTopOp, opProcCtx, oldTask, local); } @@ -189,7 +189,7 @@ private static void joinMapJoinPlan(AbstractMapJoinOperator stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { AbstractMapJoinOperator mapJoin = (AbstractMapJoinOperator) nd; GenMRProcContext ctx = (GenMRProcContext) procCtx; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java index b4aeb14..79b94d3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.GenMapRedWalker; import org.apache.hadoop.hive.ql.parse.OpParseContext; import org.apache.hadoop.hive.ql.parse.ParseContext; @@ -121,10 +122,10 @@ public MapJoinProcessor() { * map-join operator for which local work needs to be generated. * @param bigTablePos * @return - * @throws SemanticException + * @throws HiveException */ private static String genMapJoinLocalWork(MapredWork newWork, MapJoinOperator mapJoinOp, - int bigTablePos) throws SemanticException { + int bigTablePos) throws HiveException { // keep the small table alias to avoid concurrent modification exception ArrayList smallTableAliasList = new ArrayList(); String bigTableAlias = null; @@ -234,11 +235,11 @@ private static String genMapJoinLocalWork(MapredWork newWork, MapJoinOperator ma * The join operator that needs to be converted to map-join * @param bigTablePos * @return the alias to the big table - * @throws SemanticException + * @throws HiveException */ public static String genMapJoinOpAndLocalWork(HiveConf conf, MapredWork newWork, JoinOperator op, int mapJoinPos) - throws SemanticException { + throws HiveException { LinkedHashMap, OpParseContext> opParseCtxMap = newWork.getMapWork().getOpParseCtxMap(); QBJoinTree newJoinTree = newWork.getMapWork().getJoinTree(); @@ -250,7 +251,7 @@ public static String genMapJoinOpAndLocalWork(HiveConf conf, MapredWork newWork, public static String genLocalWorkForMapJoin(MapredWork newWork, MapJoinOperator newMapJoinOp, int mapJoinPos) - throws SemanticException { + throws HiveException { try { // generate the local work and return the big table alias String bigTableAlias = MapJoinProcessor @@ -269,7 +270,7 @@ public static String genLocalWorkForMapJoin(MapredWork newWork, MapJoinOperator } private static void checkParentOperatorType(Operator op) - throws SemanticException { + throws HiveException { if (!op.opAllowedBeforeMapJoin()) { throw new SemanticException(ErrorMsg.OPERATOR_NOT_ALLOWED_WITH_MAPJOIN.getMsg()); } @@ -281,7 +282,7 @@ private static void checkParentOperatorType(Operator op) } private static void checkChildOperatorType(Operator op) - throws SemanticException { + throws HiveException { if (!op.opAllowedAfterMapJoin()) { throw new SemanticException(ErrorMsg.OPERATOR_NOT_ALLOWED_WITH_MAPJOIN.getMsg()); } @@ -293,7 +294,7 @@ private static void checkChildOperatorType(Operator op) } private static void validateMapJoinTypes(Operator op) - throws SemanticException { + throws HiveException { for (Operator parentOp : op.getParentOperators()) { checkParentOperatorType(parentOp); } @@ -320,7 +321,7 @@ public static MapJoinOperator convertMapJoin(HiveConf conf, LinkedHashMap, OpParseContext> opParseCtxMap, JoinOperator op, QBJoinTree joinTree, int mapJoinPos, boolean noCheckOuterJoin, boolean validateMapJoinTree) - throws SemanticException { + throws HiveException { // outer join cannot be performed on a table which is being cached JoinDesc desc = op.getConf(); @@ -402,7 +403,7 @@ public static MapJoinOperator convertMapJoin(HiveConf conf, public static MapJoinOperator convertJoinOpMapJoinOp(HiveConf hconf, LinkedHashMap, OpParseContext> opParseCtxMap, JoinOperator op, QBJoinTree joinTree, int mapJoinPos, boolean noCheckOuterJoin) - throws SemanticException { + throws HiveException { JoinDesc desc = op.getConf(); JoinCondDesc[] condns = desc.getConds(); @@ -588,7 +589,7 @@ public static MapJoinOperator convertJoinOpMapJoinOp(HiveConf hconf, public static MapJoinOperator convertSMBJoinToMapJoin(HiveConf hconf, Map, OpParseContext> opParseCtxMap, SMBMapJoinOperator smbJoinOp, QBJoinTree joinTree, int bigTablePos, boolean noCheckOuterJoin) - throws SemanticException { + throws HiveException { // Create a new map join operator SMBJoinDesc smbJoinDesc = smbJoinOp.getConf(); List keyCols = smbJoinDesc.getKeys().get(Byte.valueOf((byte) 0)); @@ -634,7 +635,7 @@ public static MapJoinOperator convertSMBJoinToMapJoin(HiveConf hconf, } public MapJoinOperator generateMapJoinOperator(ParseContext pctx, JoinOperator op, - QBJoinTree joinTree, int mapJoinPos) throws SemanticException { + QBJoinTree joinTree, int mapJoinPos) throws HiveException { HiveConf hiveConf = pctx.getConf(); boolean noCheckOuterJoin = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTSORTMERGEBUCKETMAPJOIN) @@ -730,7 +731,7 @@ public MapJoinOperator generateMapJoinOperator(ParseContext pctx, JoinOperator o * @param mapJoinPos the position of big table as determined by either hints or auto conversion. * @param condns the join conditions * @return if given mapjoin position is a feasible big table position return same else -1. - * @throws SemanticException if given position is not in the big table candidates. + * @throws HiveException if given position is not in the big table candidates. */ public static int checkMapJoin(int mapJoinPos, JoinCondDesc[] condns) { Set bigTableCandidates = MapJoinProcessor.getBigTableCandidates(condns); @@ -742,7 +743,7 @@ public static int checkMapJoin(int mapJoinPos, JoinCondDesc[] condns) { return mapJoinPos; } - private void genSelectPlan(ParseContext pctx, MapJoinOperator input) throws SemanticException { + private void genSelectPlan(ParseContext pctx, MapJoinOperator input) throws HiveException { List> childOps = input.getChildOperators(); input.setChildOperators(null); @@ -794,7 +795,7 @@ private void genSelectPlan(ParseContext pctx, MapJoinOperator input) throws Sema * @return -1 if it cannot be converted to a map-side join, position of the map join node * otherwise */ - private int mapSideJoin(JoinOperator op, QBJoinTree joinTree) throws SemanticException { + private int mapSideJoin(JoinOperator op, QBJoinTree joinTree) throws HiveException { int mapJoinPos = -1; if (joinTree.isMapSideJoin()) { int pos = 0; @@ -835,7 +836,7 @@ private int mapSideJoin(JoinOperator op, QBJoinTree joinTree) throws SemanticExc * @param pactx * current parse context */ - public ParseContext transform(ParseContext pactx) throws SemanticException { + public ParseContext transform(ParseContext pactx) throws HiveException { pGraphContext = pactx; List listMapJoinOps = new ArrayList(); @@ -913,7 +914,7 @@ public ParseContext transform(ParseContext pactx) throws SemanticException { */ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { MapJoinWalkerCtx ctx = (MapJoinWalkerCtx) procCtx; MapJoinOperator mapJoin = (MapJoinOperator) nd; @@ -1026,7 +1027,7 @@ private static void addRejectMapJoinToCtx(MapJoinWalkerCtx ctx, */ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { MapJoinWalkerCtx ctx = (MapJoinWalkerCtx) procCtx; AbstractMapJoinOperator mapJoin = ctx.getCurrMapJoinOp(); @@ -1053,7 +1054,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, */ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { MapJoinWalkerCtx ctx = (MapJoinWalkerCtx) procCtx; AbstractMapJoinOperator mapJoin = ctx.getCurrMapJoinOp(); addRejectMapJoinToCtx(ctx, mapJoin); @@ -1072,7 +1073,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, */ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { return null; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java index 63862b9..13d8e8c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java @@ -38,8 +38,8 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils; @@ -53,7 +53,7 @@ private ParseContext pctx; @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { this.pctx = pctx; String SEL = SelectOperator.getOperatorName(); String FIL = FilterOperator.getOperatorName(); @@ -73,7 +73,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { private class SelectDedup implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { SelectOperator cSEL = (SelectOperator) nd; SelectOperator pSEL = (SelectOperator) stack.get(stack.size() - 2); if (pSEL.getNumChild() > 1) { @@ -178,7 +178,7 @@ private boolean checkReferences(ExprNodeDesc expr, Set funcOutputs, Set< private class FilterDedup implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { FilterOperator cFIL = (FilterOperator) nd; FilterOperator pFIL = (FilterOperator) stack.get(stack.size() - 2); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java index 6df4b3f..5bb6bb7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java @@ -22,6 +22,7 @@ import java.util.List; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.correlation.CorrelationOptimizer; import org.apache.hadoop.hive.ql.optimizer.correlation.ReduceSinkDeDuplication; import org.apache.hadoop.hive.ql.optimizer.index.RewriteGBUsingIndex; @@ -32,7 +33,6 @@ import org.apache.hadoop.hive.ql.optimizer.stats.annotation.AnnotateWithStatistics; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcessor; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.ppd.PredicatePushDown; import org.apache.hadoop.hive.ql.ppd.PredicateTransitivePropagate; @@ -130,9 +130,9 @@ public void initialize(HiveConf hiveConf) { * Invoke all the transformations one-by-one, and alter the query plan. * * @return ParseContext - * @throws SemanticException + * @throws HiveException */ - public ParseContext optimize() throws SemanticException { + public ParseContext optimize() throws HiveException { for (Transform t : transformations) { pctx = t.transform(pctx); } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerExpressionOperatorFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerExpressionOperatorFactory.java index b639a2a..9ae05ef 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerExpressionOperatorFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerExpressionOperatorFactory.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -49,7 +49,7 @@ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ExprNodeDesc newfd = null; ExprNodeGenericFuncDesc fd = (ExprNodeGenericFuncDesc) nd; @@ -113,7 +113,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ExprNodeFieldDesc fnd = (ExprNodeFieldDesc) nd; boolean unknown = false; @@ -150,7 +150,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ExprNodeDesc newcd = null; ExprNodeColumnDesc cd = (ExprNodeColumnDesc) nd; @@ -179,7 +179,7 @@ protected abstract ExprNodeDesc processColumnDesc(NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { if (nd instanceof ExprNodeConstantDesc) { return ((ExprNodeConstantDesc) nd).clone(); } else if (nd instanceof ExprNodeNullDesc) { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java index 51464e5..529cbc8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java @@ -27,8 +27,8 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -52,7 +52,7 @@ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { FilterOperator fop = (FilterOperator) nd; FilterOperator fop2 = null; @@ -97,11 +97,11 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * @param procCtx * @param fop * @param top - * @throws SemanticException + * @throws HiveException * @throws UDFArgumentException */ protected abstract void generatePredicate(NodeProcessorCtx procCtx, FilterOperator fop, - TableScanOperator top) throws SemanticException, UDFArgumentException; + TableScanOperator top) throws HiveException, UDFArgumentException; /** * Add pruning predicate. * @@ -176,7 +176,7 @@ protected void addPruningPred(Map> @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // Nothing needs to be done. return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerUtils.java index 108177e..4f39c9c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerUtils.java @@ -36,8 +36,8 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc; @@ -65,10 +65,10 @@ private PrunerUtils() { * @param opWalkerCtx * @param filterProc * @param defaultProc - * @throws SemanticException + * @throws HiveException */ public static void walkOperatorTree(ParseContext pctx, NodeProcessorCtx opWalkerCtx, - NodeProcessor filterProc, NodeProcessor defaultProc) throws SemanticException { + NodeProcessor filterProc, NodeProcessor defaultProc) throws HiveException { Map opRules = new LinkedHashMap(); // Build regular expression for operator rule. @@ -101,12 +101,12 @@ public static void walkOperatorTree(ParseContext pctx, NodeProcessorCtx opWalker * @param genFuncProc * @param defProc * @return - * @throws SemanticException + * @throws HiveException */ public static Map walkExprTree(ExprNodeDesc pred, NodeProcessorCtx ctx, NodeProcessor colProc, NodeProcessor fieldProc, NodeProcessor genFuncProc, NodeProcessor defProc) - throws SemanticException { + throws HiveException { // create a walker which walks the tree in a DFS manner while maintaining // the operator stack. The dispatcher // generates the plan from the operator tree diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java index 1a36dab..15fdd7a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java @@ -17,6 +17,7 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.GenTezProcContext; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.BaseWork; @@ -43,7 +44,7 @@ */ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procContext, Object... nodeOutputs) - throws SemanticException { + throws HiveException { GenTezProcContext context = (GenTezProcContext) procContext; context.preceedingWork = null; context.currentRootOperator = null; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java index b0f4b47..cfe1094 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java @@ -44,9 +44,9 @@ import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.FilterDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc; @@ -97,7 +97,7 @@ public void setOpToSamplePruner( * .hive.ql.parse.ParseContext) */ @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { // create a the context for walking operators SamplePrunerCtx samplePrunerCtx = new SamplePrunerCtx(pctx @@ -132,7 +132,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { FilterOperator filOp = (FilterOperator) nd; FilterDesc filOpDesc = filOp.getConf(); sampleDesc sampleDescr = filOpDesc.getSampleDescr(); @@ -162,7 +162,7 @@ public static NodeProcessor getFilterProc() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // Nothing needs to be done. return null; } @@ -179,11 +179,11 @@ public static NodeProcessor getDefaultProc() { * @param part * The partition to prune * @return Path[] - * @throws SemanticException + * @throws HiveException */ @SuppressWarnings("nls") public static Path[] prune(Partition part, sampleDesc sampleDescr) - throws SemanticException { + throws HiveException { int num = sampleDescr.getNumerator(); int den = sampleDescr.getDenominator(); int bucketCount = part.getBucketCount(); @@ -312,11 +312,11 @@ public static AddPathReturnStatus addPath(FileSystem fs, String pathPattern, lon * @param fileLimit * @param retPathList list of Paths returned * @return the result of the attempt - * @throws SemanticException + * @throws HiveException */ public static LimitPruneRetStatus limitPrune(Partition part, long sizeLimit, int fileLimit, Collection retPathList) - throws SemanticException { + throws HiveException { try { FileSystem fs = part.getDataLocation().getFileSystem(Hive.get().getConf()); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java index b522963..840cb8b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java @@ -29,8 +29,8 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.OptimizeTezProcContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; @@ -46,7 +46,7 @@ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procContext, Object... nodeOutputs) - throws SemanticException { + throws HiveException { OptimizeTezProcContext context = (OptimizeTezProcContext) procContext; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchAggregation.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchAggregation.java index 084f9f8..3d45334 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchAggregation.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchAggregation.java @@ -40,8 +40,8 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.AggregationDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils; @@ -54,7 +54,7 @@ public class SimpleFetchAggregation implements Transform { @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { if (pctx.getFetchTask() != null || !pctx.getQB().getIsQuery() || pctx.getQB().isAnalyzeRewrite() || pctx.getQB().isCTAS() || pctx.getLoadFileWork().size() > 1 || !pctx.getLoadTableWork().isEmpty()) { @@ -86,7 +86,7 @@ public SingleGBYProcessor(ParseContext pctx) { } public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { FileSinkOperator FS = (FileSinkOperator) nd; GroupByOperator cGBY = (GroupByOperator) stack.get(stack.size() - 3); ReduceSinkOperator RS = (ReduceSinkOperator) stack.get(stack.size() - 4); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java index 5e8dc41..60ad619 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; import org.apache.hadoop.hive.ql.parse.QB; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.SplitSample; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.FetchWork; @@ -71,7 +70,7 @@ private final Log LOG = LogFactory.getLog(SimpleFetchOptimizer.class.getName()); - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { Map> topOps = pctx.getTopOps(); if (pctx.getQB().isSimpleSelectQuery() && topOps.size() == 1) { // no join, no groupby, no distinct, no lateral view, no subq, @@ -88,10 +87,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); - if (e instanceof SemanticException) { - throw (SemanticException) e; - } - throw new SemanticException(e.getMessage(), e); + throw HiveException.wrap(e); } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java index 54afbfe..20c9f6e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java @@ -47,10 +47,10 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.RowResolver; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -87,7 +87,7 @@ public SkewJoinProc() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // We should be having a tree which looks like this // TS -> * -> RS - // \ @@ -615,7 +615,7 @@ private void insertRowResolvers( * (org.apache.hadoop.hive.ql.parse.ParseContext) */ @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { Map opRules = new LinkedHashMap(); opRules.put(new RuleRegExp("R1", "TS%.*RS%JOIN%"), getSkewJoinProc()); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapJoinOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapJoinOptimizer.java index 51f1b74..9eeb7e6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapJoinOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapJoinOptimizer.java @@ -39,8 +39,8 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.OperatorDesc; //try to replace a bucket map join with a sorted merge map join @@ -54,7 +54,7 @@ public SortedMergeBucketMapJoinOptimizer() { private void getListOfRejectedJoins( ParseContext pctx, SortBucketJoinProcCtx smbJoinContext) - throws SemanticException { + throws HiveException { // Go through all joins - it should only contain selects and filters between // tablescan and join operators. @@ -74,7 +74,7 @@ private void getListOfRejectedJoins( } @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { HiveConf conf = pctx.getConf(); SortBucketJoinProcCtx smbJoinContext = new SortBucketJoinProcCtx(conf); @@ -124,7 +124,7 @@ private NodeProcessor getDefaultProc() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) - throws SemanticException { + throws HiveException { return null; } }; @@ -136,7 +136,7 @@ private NodeProcessor getCheckCandidateJoin() { return new NodeProcessor() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { SortBucketJoinProcCtx smbJoinContext = (SortBucketJoinProcCtx)procCtx; JoinOperator joinOperator = (JoinOperator)nd; int size = stack.size(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java index 11ce47e..6fd574c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -40,7 +41,7 @@ public SortedMergeBucketMapjoinProc() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { if (nd instanceof SMBMapJoinOperator) { return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeJoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeJoinProc.java index 3a3859d..5a41f00 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeJoinProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeJoinProc.java @@ -24,8 +24,8 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; public class SortedMergeJoinProc extends AbstractSMBJoinProc implements NodeProcessor { @@ -38,7 +38,7 @@ public SortedMergeJoinProc() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { JoinOperator joinOp = (JoinOperator) nd; SortBucketJoinProcCtx smbJoinContext = (SortBucketJoinProcCtx) procCtx; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java index 75390e7..f9d55c5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java @@ -30,10 +30,10 @@ import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.AggregationDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; @@ -62,7 +62,7 @@ private static final Log Log = LogFactory.getLog(StatsOptimizer.class); @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { if (pctx.getFetchTask() != null || !pctx.getQB().getIsQuery() || pctx.getQB().isAnalyzeRewrite() || pctx.getQB().isCTAS() || @@ -142,7 +142,7 @@ private Long getNullcountFor(StatType type, ColumnStatisticsData statData) { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // 1. Do few checks to determine eligibility of optimization // 2. look at ExprNodeFuncGenericDesc in select list to see if its min, max, count etc. diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/TableSizeBasedBigTableSelectorForAutoSMJ.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/TableSizeBasedBigTableSelectorForAutoSMJ.java index 3ca11d9..7d8a646 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/TableSizeBasedBigTableSelectorForAutoSMJ.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/TableSizeBasedBigTableSelectorForAutoSMJ.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; -import org.apache.hadoop.hive.ql.parse.SemanticException; /* * This is a pluggable policy to chose the candidate map-join table for converting a join to a @@ -41,47 +40,43 @@ implements BigTableSelectorForAutoSMJ { public int getBigTablePosition(ParseContext parseCtx, JoinOperator joinOp, Set bigTableCandidates) - throws SemanticException { + throws HiveException { int bigTablePos = -1; long maxSize = -1; HiveConf conf = parseCtx.getConf(); - try { - List topOps = new ArrayList(); - getListTopOps(joinOp, topOps); - int currentPos = 0; - for (TableScanOperator topOp : topOps) { + List topOps = new ArrayList(); + getListTopOps(joinOp, topOps); + int currentPos = 0; + for (TableScanOperator topOp : topOps) { - if (topOp == null) { - return -1; - } + if (topOp == null) { + return -1; + } - if (!bigTableCandidates.contains(currentPos)) { - currentPos++; - continue; - } - Table table = parseCtx.getTopToTable().get(topOp); - long currentSize = 0; + if (!bigTableCandidates.contains(currentPos)) { + currentPos++; + continue; + } + Table table = parseCtx.getTopToTable().get(topOp); + long currentSize = 0; - if (!table.isPartitioned()) { - currentSize = getSize(conf, table); - } - else { - // For partitioned tables, get the size of all the partitions - PrunedPartitionList partsList = PartitionPruner.prune(topOp, parseCtx, null); - for (Partition part : partsList.getNotDeniedPartns()) { - currentSize += getSize(conf, part); - } + if (!table.isPartitioned()) { + currentSize = getSize(conf, table); + } + else { + // For partitioned tables, get the size of all the partitions + PrunedPartitionList partsList = PartitionPruner.prune(topOp, parseCtx, null); + for (Partition part : partsList.getNotDeniedPartns()) { + currentSize += getSize(conf, part); } + } - if (currentSize > maxSize) { - maxSize = currentSize; - bigTablePos = currentPos; - } - currentPos++; + if (currentSize > maxSize) { + maxSize = currentSize; + bigTablePos = currentPos; } - } catch (HiveException e) { - throw new SemanticException(e.getMessage()); + currentPos++; } return bigTablePos; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/Transform.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/Transform.java index 9f957da..f2c33b0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/Transform.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/Transform.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hive.ql.optimizer; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; /** * Optimizer interface. All the rule-based optimizations implement this @@ -34,7 +34,7 @@ * @param pctx * input parse context * @return ParseContext - * @throws SemanticException + * @throws HiveException */ - ParseContext transform(ParseContext pctx) throws SemanticException; + ParseContext transform(ParseContext pctx) throws HiveException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java index 3595640..8502801 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.MapJoinProcessor; import org.apache.hadoop.hive.ql.optimizer.Transform; @@ -99,7 +100,7 @@ public CorrelationOptimizer() { abort = false; } - private void findPossibleAutoConvertedJoinOperators() throws SemanticException { + private void findPossibleAutoConvertedJoinOperators() throws HiveException { // Guess if CommonJoinResolver will work. If CommonJoinResolver may // convert a join operation, correlation optimizer will not merge that join. // TODO: If hive.auto.convert.join.noconditionaltask=true, for a JoinOperator @@ -200,9 +201,9 @@ private void findPossibleAutoConvertedJoinOperators() throws SemanticException { * * @param pactx * current parse context - * @throws SemanticException + * @throws HiveException */ - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { pCtx = pctx; @@ -326,14 +327,14 @@ private boolean sameOrder(String order1, String order2) { * @param current The current operator we are visiting * @param correlation The object keeps tracking the correlation * @return - * @throws SemanticException + * @throws HiveException */ private LinkedHashSet findCorrelatedReduceSinkOperators( Operator child, List childKeyCols, List childPartitionCols, String childRSOrder, Operator current, - IntraQueryCorrelation correlation) throws SemanticException { + IntraQueryCorrelation correlation) throws HiveException { LOG.info("now detecting operator " + current.getIdentifier() + " " + current.getName()); @@ -506,11 +507,11 @@ private boolean sameOrder(String order1, String order2) { * @param correlationCtx * @param correlation * @return - * @throws SemanticException + * @throws HiveException */ private LinkedHashSet exploitJobFlowCorrelation(ReduceSinkOperator op, CorrelationNodeProcCtx correlationCtx, IntraQueryCorrelation correlation) - throws SemanticException { + throws HiveException { correlationCtx.addWalked(op); correlation.addToAllReduceSinkOperators(op); boolean shouldDetect = true; @@ -568,7 +569,7 @@ private boolean sameOrder(String order1, String order2) { } public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { CorrelationNodeProcCtx corrCtx = (CorrelationNodeProcCtx) ctx; ReduceSinkOperator op = (ReduceSinkOperator) nd; @@ -631,7 +632,7 @@ private NodeProcessor getDefaultProc() { return new NodeProcessor() { @Override public Object process(Node nd, Stack stack, - NodeProcessorCtx ctx, Object... nodeOutputs) throws SemanticException { + NodeProcessorCtx ctx, Object... nodeOutputs) throws HiveException { Operator op = (Operator) nd; LOG.info("Walk to operator " + op.getIdentifier() + " " + op.getName() + ". No actual work to do"); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java index 98fcff5..5543b9c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java @@ -82,7 +82,7 @@ protected static String getColumnName( return null; } - protected static boolean hasGroupingSet(ReduceSinkOperator cRS) throws SemanticException { + protected static boolean hasGroupingSet(ReduceSinkOperator cRS) throws HiveException { GroupByOperator cGBYm = getSingleParent(cRS, GroupByOperator.class); if (cGBYm != null && cGBYm.getConf().isGroupingSetsPresent()) { return true; @@ -98,7 +98,7 @@ protected static boolean hasGroupingSet(ReduceSinkOperator cRS) throws SemanticE * @throws HiveException */ protected static Operator getSingleParent(Operator operator, - boolean throwException) throws SemanticException { + boolean throwException) throws HiveException { List> parents = operator.getParentOperators(); if (parents != null && parents.size() == 1) { return parents.get(0); @@ -116,7 +116,7 @@ protected static boolean hasGroupingSet(ReduceSinkOperator cRS) throws SemanticE return null; } - protected static Operator getSingleParent(Operator operator) throws SemanticException { + protected static Operator getSingleParent(Operator operator) throws HiveException { return getSingleParent(operator, false); } @@ -128,7 +128,7 @@ protected static boolean hasGroupingSet(ReduceSinkOperator cRS) throws SemanticE * @throws HiveException */ protected static Operator getSingleChild(Operator operator, - boolean throwException) throws SemanticException { + boolean throwException) throws HiveException { List> children = operator.getChildOperators(); if (children != null && children.size() == 1) { return children.get(0); @@ -146,24 +146,24 @@ protected static boolean hasGroupingSet(ReduceSinkOperator cRS) throws SemanticE return null; } - protected static Operator getSingleChild(Operator operator) throws SemanticException { + protected static Operator getSingleChild(Operator operator) throws HiveException { return getSingleChild(operator, false); } protected static T getSingleChild(Operator operator, Class type) - throws SemanticException { + throws HiveException { Operator parent = getSingleChild(operator); return type.isInstance(parent) ? (T)parent : null; } protected static T getSingleParent(Operator operator, Class type) - throws SemanticException { + throws HiveException { Operator parent = getSingleParent(operator); return type.isInstance(parent) ? (T)parent : null; } protected static Operator getStartForGroupBy(ReduceSinkOperator cRS) - throws SemanticException { + throws HiveException { Operator parent = getSingleParent(cRS); return parent instanceof GroupByOperator ? parent : cRS; // skip map-aggr GBY } @@ -202,7 +202,7 @@ protected static boolean isSortedTag(JoinOperator joinOp, int tag) { } protected static int indexOf(ExprNodeDesc cexpr, ExprNodeDesc[] pexprs, Operator child, - Operator[] parents, boolean[] sorted) throws SemanticException { + Operator[] parents, boolean[] sorted) throws HiveException { for (int tag = 0; tag < parents.length; tag++) { if (sorted[tag] && pexprs[tag].isSame(ExprNodeDescUtils.backtrack(cexpr, child, parents[tag]))) { @@ -213,7 +213,7 @@ protected static int indexOf(ExprNodeDesc cexpr, ExprNodeDesc[] pexprs, Operator } protected static > T findPossibleParent(Operator start, Class target, - boolean trustScript) throws SemanticException { + boolean trustScript) throws HiveException { T[] parents = findPossibleParents(start, target, trustScript); return parents != null && parents.length == 1 ? parents[0] : null; } @@ -221,7 +221,7 @@ protected static int indexOf(ExprNodeDesc cexpr, ExprNodeDesc[] pexprs, Operator @SuppressWarnings("unchecked") protected static > T[] findPossibleParents( Operator start, Class target, - boolean trustScript) throws SemanticException { + boolean trustScript) throws HiveException { Operator cursor = getSingleParent(start); for (; cursor != null; cursor = getSingleParent(cursor)) { if (target.isAssignableFrom(cursor.getClass())) { @@ -249,7 +249,7 @@ protected static int indexOf(ExprNodeDesc cexpr, ExprNodeDesc[] pexprs, Operator @SuppressWarnings("unchecked") protected static > T[] findParents(JoinOperator join, Class target) - throws SemanticException { + throws HiveException { List> parents = join.getParentOperators(); T[] result = (T[]) Array.newInstance(target, parents.size()); for (int tag = 0; tag < result.length; tag++) { @@ -298,10 +298,10 @@ protected static TableScanOperator findTableScanOperator( /** * Find all sibling ReduceSinkOperators (which have the same child operator of op) of op (op * included). - * @throws SemanticException + * @throws HiveException */ public static List findSiblingReduceSinkOperators(ReduceSinkOperator op) - throws SemanticException { + throws HiveException { List siblingRSs = new ArrayList(); Operator child = getSingleChild(op, true); for (Operator parent: child.getParentOperators()) { @@ -318,17 +318,17 @@ protected static TableScanOperator findTableScanOperator( /** * Find all sibling operators (which have the same child operator of op) of op (op * included). - * @throws SemanticException + * @throws HiveException */ public static List> findSiblingOperators( Operator op) - throws SemanticException { + throws HiveException { Operator child = getSingleChild(op, true); return child.getParentOperators(); } protected static SelectOperator replaceReduceSinkWithSelectOperator(ReduceSinkOperator childRS, - ParseContext context, AbstractCorrelationProcCtx procCtx) throws SemanticException { + ParseContext context, AbstractCorrelationProcCtx procCtx) throws HiveException { SelectOperator select = replaceOperatorWithSelect(childRS, context, procCtx); select.getConf().setOutputColumnNames(childRS.getConf().getOutputValueColumnNames()); select.getConf().setColList(childRS.getConf().getValueCols()); @@ -339,7 +339,7 @@ protected static SelectOperator replaceReduceSinkWithSelectOperator(ReduceSinkOp // If child if cRS is EXT, EXT also should be removed protected static SelectOperator replaceOperatorWithSelect(Operator operator, ParseContext context, AbstractCorrelationProcCtx procCtx) - throws SemanticException { + throws HiveException { RowResolver inputRR = context.getOpParseCtx().get(operator).getRowResolver(); SelectDesc select = new SelectDesc(null, null); @@ -369,7 +369,7 @@ protected static SelectOperator replaceOperatorWithSelect(Operator operator, } protected static void removeReduceSinkForGroupBy(ReduceSinkOperator cRS, GroupByOperator cGBYr, - ParseContext context, AbstractCorrelationProcCtx procCtx) throws SemanticException { + ParseContext context, AbstractCorrelationProcCtx procCtx) throws HiveException { Operator parent = getSingleParent(cRS); @@ -432,7 +432,7 @@ protected static void removeReduceSinkForGroupBy(ReduceSinkOperator cRS, GroupBy * @param operator * @throws HiveException */ - protected static void isNullOperator(Operator operator) throws SemanticException { + protected static void isNullOperator(Operator operator) throws HiveException { if (operator == null) { throw new SemanticException("Operator " + operator.getName() + " (ID: " + operator.getIdentifier() + ") is null."); @@ -448,7 +448,7 @@ protected static void isNullOperator(Operator operator) throws SemanticExcept */ protected static void insertOperatorBetween( Operator newOperator, Operator parent, Operator child) - throws SemanticException { + throws HiveException { isNullOperator(newOperator); isNullOperator(parent); isNullOperator(child); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/QueryPlanTreeTransformation.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/QueryPlanTreeTransformation.java index aa02a40..cbb9ea5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/QueryPlanTreeTransformation.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/QueryPlanTreeTransformation.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.ql.exec.OperatorFactory; import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.correlation.CorrelationOptimizer.CorrelationNodeProcCtx; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -52,7 +53,7 @@ private static void setNewTag(IntraQueryCorrelation correlation, List> childrenOfDemux, ReduceSinkOperator rsop, Map bottomRSToNewTag) - throws SemanticException { + throws HiveException { int newTag = bottomRSToNewTag.get(rsop); int oldTag = rsop.getConf().getTag(); if (oldTag == -1) { @@ -100,13 +101,13 @@ private static void setNewTag(IntraQueryCorrelation correlation, * @param pCtx * @param corrCtx * @param correlation - * @throws SemanticException + * @throws HiveException */ protected static void applyCorrelation( ParseContext pCtx, CorrelationNodeProcCtx corrCtx, IntraQueryCorrelation correlation) - throws SemanticException { + throws HiveException { final List bottomReduceSinkOperators = correlation.getBottomReduceSinkOperators(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java index b206448..2d0a591 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.Transform; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -65,7 +66,7 @@ protected ParseContext pGraphContext; @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { pGraphContext = pctx; // generate pruned column list for all relevant operators @@ -133,7 +134,7 @@ public static NodeProcessor getDefaultProc() { static class DefaultProc implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { return null; } } @@ -141,7 +142,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, public abstract static class AbsctractReducerReducerProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ReduceSinkDeduplicateProcCtx dedupCtx = (ReduceSinkDeduplicateProcCtx) procCtx; if (dedupCtx.hasBeenRemoved((Operator) nd)) { return false; @@ -165,15 +166,15 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } protected abstract Object process(ReduceSinkOperator cRS, ReduceSinkDeduplicateProcCtx dedupCtx) - throws SemanticException; + throws HiveException; protected abstract Object process(ReduceSinkOperator cRS, GroupByOperator cGBY, - ReduceSinkDeduplicateProcCtx dedupCtx) throws SemanticException; + ReduceSinkDeduplicateProcCtx dedupCtx) throws HiveException; // for JOIN-RS case, it's not possible generally to merge if child has // more key/partition columns than parents protected boolean merge(ReduceSinkOperator cRS, JoinOperator pJoin, int minReducer) - throws SemanticException { + throws HiveException { List> parents = pJoin.getParentOperators(); ReduceSinkOperator[] pRSs = parents.toArray(new ReduceSinkOperator[parents.size()]); ReduceSinkDesc cRSc = cRS.getConf(); @@ -240,7 +241,7 @@ protected boolean merge(ReduceSinkOperator cRS, JoinOperator pJoin, int minReduc * partitioning columns (if exist) of child RS. */ protected boolean merge(ReduceSinkOperator cRS, ReduceSinkOperator pRS, int minReducer) - throws SemanticException { + throws HiveException { int[] result = checkStatus(cRS, pRS, minReducer); if (result == null) { return false; @@ -309,7 +310,7 @@ protected boolean merge(ReduceSinkOperator cRS, ReduceSinkOperator pRS, int minR * 3. for 1, configuration of child RS is more specific than parent RS */ private int[] checkStatus(ReduceSinkOperator cRS, ReduceSinkOperator pRS, int minReducer) - throws SemanticException { + throws HiveException { ReduceSinkDesc cConf = cRS.getConf(); ReduceSinkDesc pConf = pRS.getConf(); Integer moveRSOrderTo = checkOrder(cConf.getOrder(), pConf.getOrder()); @@ -342,7 +343,7 @@ protected boolean merge(ReduceSinkOperator cRS, ReduceSinkOperator pRS, int minR * should be backtrackable to parent. */ private Integer checkExprs(List ckeys, List pkeys, - ReduceSinkOperator cRS, ReduceSinkOperator pRS) throws SemanticException { + ReduceSinkOperator cRS, ReduceSinkOperator pRS) throws HiveException { Integer moveKeyColTo = 0; if (ckeys == null || ckeys.isEmpty()) { if (pkeys != null && !pkeys.isEmpty()) { @@ -366,7 +367,7 @@ private Integer checkExprs(List ckeys, List pkeys, // backtrack key exprs of child to parent and compare it with parent's protected Integer sameKeys(List cexprs, List pexprs, - Operator child, Operator parent) throws SemanticException { + Operator child, Operator parent) throws HiveException { int common = Math.min(cexprs.size(), pexprs.size()); int limit = Math.max(cexprs.size(), pexprs.size()); int i = 0; @@ -435,7 +436,7 @@ protected Integer checkNumReducer(int creduce, int preduce) { // pRS-pGBY-cRS @Override public Object process(ReduceSinkOperator cRS, ReduceSinkDeduplicateProcCtx dedupCtx) - throws SemanticException { + throws HiveException { GroupByOperator pGBY = CorrelationUtilities.findPossibleParent( cRS, GroupByOperator.class, dedupCtx.trustScript()); @@ -457,7 +458,7 @@ public Object process(ReduceSinkOperator cRS, ReduceSinkDeduplicateProcCtx dedup @Override public Object process(ReduceSinkOperator cRS, GroupByOperator cGBY, ReduceSinkDeduplicateProcCtx dedupCtx) - throws SemanticException { + throws HiveException { Operator start = CorrelationUtilities.getStartForGroupBy(cRS); GroupByOperator pGBY = CorrelationUtilities.findPossibleParent( @@ -481,7 +482,7 @@ public Object process(ReduceSinkOperator cRS, GroupByOperator cGBY, // pRS-pJOIN-cRS @Override public Object process(ReduceSinkOperator cRS, ReduceSinkDeduplicateProcCtx dedupCtx) - throws SemanticException { + throws HiveException { JoinOperator pJoin = CorrelationUtilities.findPossibleParent(cRS, JoinOperator.class, dedupCtx.trustScript()); if (pJoin != null && merge(cRS, pJoin, dedupCtx.minReducer())) { @@ -497,7 +498,7 @@ public Object process(ReduceSinkOperator cRS, ReduceSinkDeduplicateProcCtx dedup @Override public Object process(ReduceSinkOperator cRS, GroupByOperator cGBY, ReduceSinkDeduplicateProcCtx dedupCtx) - throws SemanticException { + throws HiveException { Operator start = CorrelationUtilities.getStartForGroupBy(cRS); JoinOperator pJoin = CorrelationUtilities.findPossibleParent( @@ -517,7 +518,7 @@ public Object process(ReduceSinkOperator cRS, GroupByOperator cGBY, // pRS-cRS @Override public Object process(ReduceSinkOperator cRS, ReduceSinkDeduplicateProcCtx dedupCtx) - throws SemanticException { + throws HiveException { ReduceSinkOperator pRS = CorrelationUtilities.findPossibleParent( cRS, ReduceSinkOperator.class, dedupCtx.trustScript()); @@ -533,7 +534,7 @@ public Object process(ReduceSinkOperator cRS, ReduceSinkDeduplicateProcCtx dedup @Override public Object process(ReduceSinkOperator cRS, GroupByOperator cGBY, ReduceSinkDeduplicateProcCtx dedupCtx) - throws SemanticException { + throws HiveException { Operator start = CorrelationUtilities.getStartForGroupBy(cRS); ReduceSinkOperator pRS = CorrelationUtilities.findPossibleParent( diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyCtx.java index cc94254..fefd425 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyCtx.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyCtx.java @@ -43,8 +43,8 @@ import org.apache.hadoop.hive.ql.lib.PreOrderWalker; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.OperatorDesc; /** @@ -253,10 +253,10 @@ public ParseContext getParseContext() { * {@link RewriteVars} enum. * * @param topOp - * @throws SemanticException + * @throws HiveException */ void populateRewriteVars(Operator topOp) - throws SemanticException{ + throws HiveException { Map opRules = new LinkedHashMap(); opRules.put(new RuleRegExp("R1", FilterOperator.getOperatorName() + "%"), RewriteCanApplyProcFactory.canApplyOnFilterOperator()); @@ -276,10 +276,10 @@ void populateRewriteVars(Operator topOp) try { ogw.startWalking(topNodes, null); - } catch (SemanticException e) { + } catch (HiveException e) { LOG.error("Exception in walking operator tree. Rewrite variables not populated"); LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); + throw e; } } @@ -292,7 +292,7 @@ private NodeProcessor getDefaultProc() { return new NodeProcessor() { @Override public Object process(Node nd, Stack stack, - NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { + NodeProcessorCtx procCtx, Object... nodeOutputs) throws HiveException { return null; } }; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyProcFactory.java index 4a2f52f..aba05e4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyProcFactory.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.AggregationDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; @@ -61,7 +61,7 @@ private RewriteCanApplyProcFactory(){ */ private static class CheckFilterProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { FilterOperator operator = (FilterOperator)nd; canApplyCtx = (RewriteCanApplyCtx)ctx; FilterDesc conf = (FilterDesc)operator.getConf(); @@ -96,7 +96,7 @@ public static CheckFilterProc canApplyOnFilterOperator() { private static class CheckGroupByProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { GroupByOperator operator = (GroupByOperator)nd; canApplyCtx = (RewriteCanApplyCtx)ctx; //for each group-by clause in query, only one GroupByOperator of the @@ -200,7 +200,7 @@ public static CheckGroupByProc canApplyOnGroupByOperator() { */ private static class CheckSelectProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { SelectOperator operator = (SelectOperator)nd; canApplyCtx = (RewriteCanApplyCtx)ctx; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java index 11a6d07..571d79e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java @@ -117,7 +117,7 @@ @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { parseContext = pctx; hiveConf = parseContext.getConf(); try { @@ -153,9 +153,9 @@ private String getName() { * did not meet the rewrite criterion. * * @return - * @throws SemanticException + * @throws HiveException */ - boolean shouldApplyOptimization() throws SemanticException{ + boolean shouldApplyOptimization() throws HiveException{ boolean canApply = false; if(ifQueryHasMultipleTables()){ //We do not apply this optimization for this case as of now. @@ -217,10 +217,10 @@ boolean shouldApplyOptimization() throws SemanticException{ * @param topOp - TableScanOperator for a single the operator tree branch * @param indexes - Map of a table and list of indexes on it * @return - true if rewrite can be applied on the current branch; false otherwise - * @throws SemanticException + * @throws HiveException */ private boolean checkIfRewriteCanBeApplied(TableScanOperator topOp, Table baseTable, - Map> indexes) throws SemanticException{ + Map> indexes) throws HiveException{ boolean canApply = false; //Context for checking if this optimization can be applied to the input query RewriteCanApplyCtx canApplyCtx = RewriteCanApplyCtx.getInstance(parseContext); @@ -300,9 +300,9 @@ boolean ifQueryHasMultipleTables(){ /** * Get a list of indexes which can be used for rewrite. * @return - * @throws SemanticException + * @throws HiveException */ - private Map> getIndexesForRewrite() throws SemanticException{ + private Map> getIndexesForRewrite() throws HiveException{ List supportedIndexes = new ArrayList(); supportedIndexes.add(AggregateIndexHandler.class.getName()); @@ -326,10 +326,10 @@ boolean ifQueryHasMultipleTables(){ * @param tableScan * @param indexes * @return - * @throws SemanticException + * @throws HiveException */ private boolean checkIfIndexBuiltOnAllTablePartitions(TableScanOperator tableScan, - Map> indexes) throws SemanticException{ + Map> indexes) throws HiveException{ // check if we have indexes on all partitions in this table scan Set queryPartitions; try { @@ -339,7 +339,7 @@ private boolean checkIfIndexBuiltOnAllTablePartitions(TableScanOperator tableSca } } catch (HiveException e) { LOG.error("Fatal Error: problem accessing metastore", e); - throw new SemanticException(e); + throw e; } if(queryPartitions.size() != 0){ return true; @@ -352,9 +352,9 @@ private boolean checkIfIndexBuiltOnAllTablePartitions(TableScanOperator tableSca * for all the indexes that satisfy the rewrite criteria. * @param indexTables * @return - * @throws SemanticException + * @throws HiveException */ - Map> getIndexToKeysMap(List indexTables) throws SemanticException{ + Map> getIndexToKeysMap(List indexTables) throws HiveException{ Index index = null; Hive hiveInstance = hiveDb; Map> indexToKeysMap = new LinkedHashMap>(); @@ -398,11 +398,11 @@ private boolean checkIfIndexBuiltOnAllTablePartitions(TableScanOperator tableSca /** * Method to rewrite the input query if all optimization criteria is passed. * The method iterates over the tsOpToProcess {@link ArrayList} to apply the rewrites - * @throws SemanticException + * @throws HiveException * */ @SuppressWarnings("unchecked") - private void rewriteOriginalQuery() throws SemanticException { + private void rewriteOriginalQuery() throws HiveException { Map> topOpMap = (HashMap>) parseContext.getTopOps().clone(); Iterator tsOpItr = tsOpToProcess.keySet().iterator(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java index dee7d7e..5967054 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java @@ -24,6 +24,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.ParseContext; @@ -55,10 +56,10 @@ private RewriteParseContextGenerator(){ * @param conf * @param command * @return the parse context - * @throws SemanticException + * @throws HiveException */ public static ParseContext generateOperatorTree(HiveConf conf, - String command) throws SemanticException{ + String command) throws HiveException { Context ctx; ParseContext subPCtx = null; try { @@ -83,11 +84,11 @@ public static ParseContext generateOperatorTree(HiveConf conf, "tree for input command - " + command + " " , e); LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); throw new SemanticException(e.getMessage(), e); - } catch (SemanticException e) { - LOG.error("SemanticException in generating the operator " + + } catch (HiveException e) { + LOG.error("HiveException in generating the operator " + "tree for input command - " + command + " " , e); LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); + throw e; } return subPCtx; @@ -101,10 +102,10 @@ public static ParseContext generateOperatorTree(HiveConf conf, * @param sem * @param ast * @return - * @throws SemanticException + * @throws HiveException */ private static void doSemanticAnalysis(SemanticAnalyzer sem, - ASTNode ast, Context ctx) throws SemanticException { + ASTNode ast, Context ctx) throws HiveException { QB qb = new QB(null, null, false); ASTNode child = ast; ParseContext subPCtx = ((SemanticAnalyzer) sem).getParseContext(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndex.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndex.java index 1d8336f..d6e7ba7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndex.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndex.java @@ -76,7 +76,7 @@ private RewriteQueryUsingAggregateIndex() { private static class NewQuerySelectSchemaProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { SelectOperator operator = (SelectOperator)nd; rewriteQueryCtx = (RewriteQueryUsingAggregateIndexCtx)ctx; List> childOps = operator.getChildOperators(); @@ -122,7 +122,7 @@ public static NewQuerySelectSchemaProc getNewQuerySelectSchemaProc(){ */ private static class ReplaceTableScanOpProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { TableScanOperator scanOperator = (TableScanOperator)nd; rewriteQueryCtx = (RewriteQueryUsingAggregateIndexCtx)ctx; String baseTableName = rewriteQueryCtx.getBaseTableName(); @@ -159,7 +159,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, } catch (HiveException e) { LOG.error("Error while getting the table handle for index table."); LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); + throw e; } String k = indexTableName + Path.SEPARATOR; @@ -226,7 +226,7 @@ public static ReplaceTableScanOpProc getReplaceTableScanProc(){ */ private static class NewQueryGroupbySchemaProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { GroupByOperator operator = (GroupByOperator)nd; rewriteQueryCtx = (RewriteQueryUsingAggregateIndexCtx)ctx; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java index b5873a4..e41546a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java @@ -38,9 +38,9 @@ import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.OpParseContext; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; @@ -119,10 +119,10 @@ public ExprNodeColumnDesc getAggrExprNode() { * to rewrite the original query using aggregate index. * * @param topOp - * @throws SemanticException + * @throws HiveException */ public void invokeRewriteQueryProc( - Operator topOp) throws SemanticException{ + Operator topOp) throws HiveException { Map opRules = new LinkedHashMap(); // replace scan operator containing original table with index table @@ -155,7 +155,7 @@ private NodeProcessor getDefaultProc() { return new NodeProcessor() { @Override public Object process(Node nd, Stack stack, - NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { + NodeProcessorCtx procCtx, Object... nodeOutputs) throws HiveException { return null; } }; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java index b5cdde1..9c83ae0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -63,7 +63,7 @@ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ExprNodeColumnDesc cd = (ExprNodeColumnDesc) nd; ExprProcCtx epc = (ExprProcCtx) procCtx; @@ -97,7 +97,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { assert (nd instanceof ExprNodeGenericFuncDesc || nd instanceof ExprNodeFieldDesc); @@ -134,7 +134,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { assert (nd instanceof ExprNodeConstantDesc || nd instanceof ExprNodeNullDesc); // Create a dependency that has no basecols @@ -170,11 +170,11 @@ public static NodeProcessor getColumnProcessor() { * The input operator to the current operator. * @param expr * The expression that is being processed. - * @throws SemanticException + * @throws HiveException */ public static Dependency getExprDependency(LineageCtx lctx, Operator inpOp, ExprNodeDesc expr) - throws SemanticException { + throws HiveException { // Create the walker, the rules dispatcher and the context. ExprProcCtx exprCtx = new ExprProcCtx(lctx, inpOp); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/Generator.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/Generator.java index 51bef04..d3edcd0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/Generator.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/Generator.java @@ -41,9 +41,9 @@ import org.apache.hadoop.hive.ql.lib.PreOrderWalker; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.Transform; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.session.SessionState; /** @@ -57,7 +57,7 @@ * @see org.apache.hadoop.hive.ql.optimizer.Transform#transform(org.apache.hadoop.hive.ql.parse.ParseContext) */ @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { // Create the lineage context LineageCtx lCtx = new LineageCtx(pctx); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java index 78b7ca8..27ea6fd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java @@ -50,9 +50,9 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Utils; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.AggregationDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.JoinDesc; @@ -82,7 +82,7 @@ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // LineageCTx LineageCtx lCtx = (LineageCtx) procCtx; @@ -128,7 +128,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // LineageCtx LineageCtx lCtx = (LineageCtx) procCtx; @@ -186,7 +186,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, public static class JoinLineage extends DefaultLineage implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // Assert that there is atleast one item in the stack. This should never // be called for leafs. @@ -228,7 +228,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, public static class LateralViewJoinLineage extends DefaultLineage implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // Assert that there is atleast one item in the stack. This should never // be called for leafs. @@ -279,7 +279,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, public static class SelectLineage extends DefaultLineage implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { LineageCtx lctx = (LineageCtx)procCtx; SelectOperator sop = (SelectOperator)nd; @@ -312,7 +312,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, public static class GroupByLineage extends DefaultLineage implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { LineageCtx lctx = (LineageCtx)procCtx; GroupByOperator gop = (GroupByOperator)nd; @@ -394,7 +394,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @SuppressWarnings("unchecked") @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // Assert that there is atleast one item in the stack. This should never // be called for leafs. assert(!stack.isEmpty()); @@ -429,7 +429,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @SuppressWarnings("unchecked") @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // Assert that there is atleast one item in the stack. This should never // be called for leafs. assert(!stack.isEmpty()); @@ -476,7 +476,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @SuppressWarnings("unchecked") @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // Assert that there is atleast one item in the stack. This should never // be called for leafs. assert(!stack.isEmpty()); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBExprProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBExprProcFactory.java index 2cda11b..a4a74b1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBExprProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBExprProcFactory.java @@ -22,10 +22,10 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.optimizer.PrunerExpressionOperatorFactory; import org.apache.hadoop.hive.ql.optimizer.PrunerUtils; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -85,10 +85,10 @@ private boolean isPruneForListBucketing(Partition part, String columnName) { * generated * @param part * The partition this walker is walking - * @throws SemanticException + * @throws HiveException */ public static ExprNodeDesc genPruner(String tabAlias, ExprNodeDesc pred, Partition part) - throws SemanticException { + throws HiveException { // Create the walker, the rules dispatcher and the context. NodeProcessorCtx lbprCtx = new LBExprProcCtx(tabAlias, part); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBPartitionProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBPartitionProcFactory.java index 41d27fa..b3ffe8f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBPartitionProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBPartitionProcFactory.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; -import org.apache.hadoop.hive.ql.parse.SemanticException; /** * Walk through top operators in tree to find all partitions. @@ -49,7 +48,7 @@ @Override protected void generatePredicate(NodeProcessorCtx procCtx, FilterOperator fop, - TableScanOperator top) throws SemanticException, UDFArgumentException { + TableScanOperator top) throws HiveException, UDFArgumentException { LBOpPartitionWalkerCtx owc = (LBOpPartitionWalkerCtx) procCtx; //Run partition pruner to get partitions @@ -61,7 +60,7 @@ protected void generatePredicate(NodeProcessorCtx procCtx, FilterOperator fop, } catch (HiveException e) { // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils - throw new SemanticException(e.getMessage(), e); + throw e; } if (prunedPartList != null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBProcFactory.java index 8857ced..2be4602 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBProcFactory.java @@ -22,8 +22,8 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.PrunerOperatorFactory; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; /** @@ -42,7 +42,7 @@ @Override protected void generatePredicate(NodeProcessorCtx procCtx, FilterOperator fop, - TableScanOperator top) throws SemanticException, UDFArgumentException { + TableScanOperator top) throws HiveException, UDFArgumentException { LBOpWalkerCtx owc = (LBOpWalkerCtx) procCtx; // Otherwise this is not a sampling predicate and we need to ExprNodeDesc predicate = fop.getConf().getPredicate(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java index 33b6e86..2c17999 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java @@ -28,12 +28,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.optimizer.PrunerUtils; import org.apache.hadoop.hive.ql.optimizer.Transform; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; /** @@ -50,7 +50,7 @@ * ParseContext) */ @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { // create a the context for walking operators NodeProcessorCtx opPartWalkerCtx = new LBOpPartitionWalkerCtx(pctx); @@ -188,7 +188,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { try { finalPaths = execute(ctx, part, pruner); - } catch (SemanticException e) { + } catch (HiveException e) { // Use full partition path for error case. LOG.warn("Using full partition scan :" + part.getPath() + ".", e); finalPaths = part.getPath(); @@ -204,10 +204,10 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { * @param part * @param pruner * @return - * @throws SemanticException + * @throws HiveException */ private static Path[] execute(ParseContext ctx, Partition part, ExprNodeDesc pruner) - throws SemanticException { + throws HiveException { Path[] finalPaths; List selectedPaths = new ArrayList(); @@ -281,11 +281,11 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { * @param collections * @param uniqSkewedValues * @return - * @throws SemanticException + * @throws HiveException */ private static List decideSkewedValueDirSelection(Partition part, ExprNodeDesc pruner, List selectedPaths, List> collections, - List> uniqSkewedValues) throws SemanticException { + List> uniqSkewedValues) throws HiveException { // For each entry in dynamic-multi-dimension collection. List skewedCols = part.getSkewedColNames(); // Retrieve skewed column. Map, String> mappings = part.getSkewedColValueLocationMaps(); // Retrieve skewed @@ -471,10 +471,10 @@ private static void decideDefaultDirSelection(Partition part, List selecte * It returns the complete collection * (1,a) , (1,b) , (1,c) , (1,other), (2,a), (2,b) , (2,c), (2,other), (other,a), (other,b), * (other,c), (other,other) - * @throws SemanticException + * @throws HiveException */ public static List> generateCollection(List> values) - throws SemanticException { + throws HiveException { // Calculate unique skewed elements for each skewed column. List> uniqSkewedElements = DynamicMultiDimensionalCollection.uniqueElementsList( values, ListBucketingPrunerUtils.HIVE_LIST_BUCKETING_DEFAULT_KEY); @@ -590,7 +590,7 @@ private static void decideDefaultDirSelection(Partition part, List selecte * @return */ public static List> flat(List> uniqSkewedElements) - throws SemanticException { + throws HiveException { if (uniqSkewedElements == null) { return null; } @@ -606,10 +606,10 @@ private static void decideDefaultDirSelection(Partition part, List selecte * @param input * @param listSoFar * @param level - * @throws SemanticException + * @throws HiveException */ private static void walker(List> finalResult, final List> input, - List listSoFar, final int level) throws SemanticException { + List listSoFar, final int level) throws HiveException { // Base case. if (level == (input.size() - 1)) { assert (input.get(level) != null) : "Unique skewed element list has null list in " + level diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java index ccb75eb..ef2a0a3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java @@ -20,6 +20,7 @@ import java.util.List; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -158,11 +159,11 @@ public static Boolean notBoolOperand(Boolean input) { * and or * / \ / \ * c1=1 c2=a c1=3 c2=b - * @throws SemanticException + * @throws HiveException * */ static Boolean evaluateExprOnCell(List skewedCols, List cell, - ExprNodeDesc pruner, List> uniqSkewedValues) throws SemanticException { + ExprNodeDesc pruner, List> uniqSkewedValues) throws HiveException { return recursiveExpr(pruner, skewedCols, cell, uniqSkewedValues); } @@ -174,11 +175,11 @@ static Boolean evaluateExprOnCell(List skewedCols, List cell, * @param skewedCols * @param cell * @return - * @throws SemanticException + * @throws HiveException */ private static Boolean recursiveExpr(final ExprNodeDesc node, final List skewedCols, final List cell, final List> uniqSkewedValues) - throws SemanticException { + throws HiveException { if (isUnknownState(node)) { return null; } @@ -209,10 +210,10 @@ private static Boolean recursiveExpr(final ExprNodeDesc node, final List * @param cell * @param uniqSkewedValues * @return - * @throws SemanticException + * @throws HiveException */ private static Boolean evaluateEqualNd(final ExprNodeDesc node, final List skewedCols, - final List cell, final List> uniqSkewedValues) throws SemanticException { + final List cell, final List> uniqSkewedValues) throws HiveException { Boolean result = null; List children = ((ExprNodeGenericFuncDesc) node).getChildren(); assert ((children != null) && (children.size() == 2)) : "GenericUDFOPEqual should have 2 " + @@ -238,11 +239,11 @@ private static Boolean evaluateEqualNd(final ExprNodeDesc node, final List skewedCols, final List cell, final List> uniqSkewedValues, Boolean result, - ExprNodeDesc left, ExprNodeDesc right) throws SemanticException { + ExprNodeDesc left, ExprNodeDesc right) throws HiveException { String columnNameInFilter = ((ExprNodeColumnDesc) left).getColumn(); String constantValueInFilter = ((ExprNodeConstantDesc) right).getValue().toString(); assert (skewedCols.contains(columnNameInFilter)) : "List bucketing pruner has a column name " @@ -298,7 +299,7 @@ private static Boolean coreComparisonInEqualNode(String constantValueInFilter, } private static Boolean evaluateNotNode(final ExprNodeDesc node, final List skewedCols, - final List cell, final List> uniqSkewedValues) throws SemanticException { + final List cell, final List> uniqSkewedValues) throws HiveException { List children = ((ExprNodeGenericFuncDesc) node).getChildren(); if ((children == null) || (children.size() != 1)) { throw new SemanticException("GenericUDFOPNot should have 1 ExprNodeDesc. Node name : " @@ -309,7 +310,7 @@ private static Boolean evaluateNotNode(final ExprNodeDesc node, final List skewedCols, - final List cell, final List> uniqSkewedValues) throws SemanticException { + final List cell, final List> uniqSkewedValues) throws HiveException { List children = ((ExprNodeGenericFuncDesc) node).getChildren(); if ((children == null) || (children.size() != 2)) { throw new SemanticException("GenericUDFOPOr should have 2 ExprNodeDesc. Node name : " @@ -322,7 +323,7 @@ private static Boolean evaluateOrNode(final ExprNodeDesc node, final List skewedCols, - final List cell, final List> uniqSkewedValues) throws SemanticException { + final List cell, final List> uniqSkewedValues) throws HiveException { List children = ((ExprNodeGenericFuncDesc) node).getChildren(); if ((children == null) || (children.size() != 2)) { throw new SemanticException("GenericUDFOPAnd should have 2 ExprNodeDesc. Node name : " diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PartitionConditionRemover.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PartitionConditionRemover.java index cbed375..41b3ede 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PartitionConditionRemover.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PartitionConditionRemover.java @@ -36,9 +36,9 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.Transform; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; /** * The transformation step that does partition condition remover. @@ -58,7 +58,7 @@ * .hive.ql.parse.ParseContext) */ @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { // create a the context for walking operators List opToRemove = diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java index bd0e8c3..8272c51 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.optimizer.ppr.PartExprEvalUtils; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -59,7 +58,7 @@ */ public final class PcrExprProcFactory { static Object evalExprWithPart(ExprNodeDesc expr, Partition p, List vcs) - throws SemanticException { + throws HiveException { StructObjectInspector rowObjectInspector; Table tbl = p.getTable(); LinkedHashMap partSpec = p.getSpec(); @@ -68,14 +67,10 @@ static Object evalExprWithPart(ExprNodeDesc expr, Partition p, List stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ExprNodeColumnDesc cd = (ExprNodeColumnDesc) nd; PcrExprProcCtx epc = (PcrExprProcCtx) procCtx; if (cd.getTabAlias().equalsIgnoreCase(epc.getTabAlias()) @@ -230,7 +225,7 @@ public static ExprNodeGenericFuncDesc getOutExpr( public static class GenericFuncExprProcessor implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { PcrExprProcCtx ctx = (PcrExprProcCtx) procCtx; ExprNodeGenericFuncDesc fd = (ExprNodeGenericFuncDesc) nd; @@ -364,7 +359,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ExprNodeFieldDesc fnd = (ExprNodeFieldDesc) nd; boolean unknown = false; for (Object child : nodeOutputs) { @@ -392,7 +387,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { if (nd instanceof ExprNodeConstantDesc || nd instanceof ExprNodeNullDesc) { return new NodeInfoWrapper(WalkState.CONSTANT, null, (ExprNodeDesc) nd); @@ -430,11 +425,11 @@ public static NodeProcessor getColumnProcessor() { * @param pred * expression tree of the target filter operator * @return the node information of the root expression - * @throws SemanticException + * @throws HiveException */ public static NodeInfoWrapper walkExprTree( String tabAlias, ArrayList parts, List vcs, ExprNodeDesc pred) - throws SemanticException { + throws HiveException { // Create the walker, the rules dispatcher and the context. PcrExprProcCtx pprCtx = new PcrExprProcCtx(tabAlias, parts, vcs); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrOpProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrOpProcFactory.java index c2d49f6..d600394 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrOpProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrOpProcFactory.java @@ -57,7 +57,7 @@ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { PcrOpWalkerCtx owc = (PcrOpWalkerCtx) procCtx; FilterOperator fop = (FilterOperator) nd; FilterOperator fop2 = null; @@ -108,7 +108,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } catch (HiveException e) { // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils - throw new SemanticException(e.getMessage(), e); + throw e; } // Otherwise this is not a sampling predicate. We need to process it. @@ -151,7 +151,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { // Nothing needs to be done. return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java index 33ef581..21cf35d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java @@ -33,8 +33,8 @@ import org.apache.hadoop.hive.ql.lib.Dispatcher; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.TaskGraphWalker.TaskGraphWalkerContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.MapWork; /** @@ -50,7 +50,7 @@ public AbstractJoinTaskDispatcher(PhysicalContext context) { public abstract Task processCurrentTask(MapRedTask currTask, ConditionalTask conditionalTask, Context context) - throws SemanticException; + throws HiveException; protected void replaceTaskWithConditionalTask( Task currTask, ConditionalTask cndTsk, @@ -122,7 +122,7 @@ protected void replaceTask( public long getTotalKnownInputSize(Context context, MapWork currWork, Map> pathToAliases, - HashMap aliasToSize) throws SemanticException { + HashMap aliasToSize) throws HiveException { try { // go over all the input paths, and calculate a known total size, known // size for each input alias. @@ -152,13 +152,13 @@ public long getTotalKnownInputSize(Context context, MapWork currWork, return aliasTotalKnownInputSize; } catch (Exception e) { e.printStackTrace(); - throw new SemanticException("Generate Map Join Task Error: " + e.getMessage()); + throw new SemanticException("Generate Map Join Task Error: " + e.getMessage(), e); } } @Override public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) - throws SemanticException { + throws HiveException { if (nodeOutputs == null || nodeOutputs.length == 0) { throw new SemanticException("No Dispatch Context"); } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java index 87fba2d..db13cde 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleExactMatch; import org.apache.hadoop.hive.ql.lib.RuleRegExp; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.OperatorDesc; /** @@ -64,7 +64,7 @@ public class BucketingSortingInferenceOptimizer implements PhysicalPlanResolver { @Override - public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { + public PhysicalContext resolve(PhysicalContext pctx) throws HiveException { inferBucketingSorting(Utilities.getMRTasks(pctx.rootTasks)); return pctx; } @@ -74,9 +74,9 @@ public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { * reducer is bucketed and/or sorted * * @param mapRedTasks - * @throws SemanticException + * @throws HiveException */ - private void inferBucketingSorting(List mapRedTasks) throws SemanticException { + private void inferBucketingSorting(List mapRedTasks) throws HiveException { for (ExecDriver mapRedTask : mapRedTasks) { // For now this only is used to determine the bucketing/sorting of outputs, in the future diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java index eac0edd..8a31d08 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java @@ -36,10 +36,10 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Utils; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol; import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketSortCol; import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.SortCol; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc.ExprNodeDescEqualityWrapper; @@ -54,7 +54,7 @@ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { return null; } @@ -67,10 +67,10 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * @param op * @param bctx * @param parent - * @throws SemanticException + * @throws HiveException */ private static void processForward(Operator op, BucketingSortingCtx bctx, - Operator parent) throws SemanticException { + Operator parent) throws HiveException { List bucketCols = bctx.getBucketedCols(parent); List sortCols = bctx.getSortedCols(parent); @@ -125,7 +125,7 @@ private static void processForward(Operator op, Bucketin public static class JoinInferrer extends DefaultInferrer implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { BucketingSortingCtx bctx = (BucketingSortingCtx)procCtx; JoinOperator jop = (JoinOperator)nd; @@ -326,7 +326,7 @@ private static void findBucketingSortingColumns(List exprs, public static class SelectInferrer extends DefaultInferrer implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { BucketingSortingCtx bctx = (BucketingSortingCtx)procCtx; SelectOperator sop = (SelectOperator)nd; @@ -450,7 +450,7 @@ private static int indexOfColName(List bucketSortCols, public static class FileSinkInferrer extends DefaultInferrer implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { BucketingSortingCtx bctx = (BucketingSortingCtx)procCtx; FileSinkOperator fop = (FileSinkOperator)nd; @@ -492,7 +492,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, public static class ExtractInferrer extends DefaultInferrer implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { BucketingSortingCtx bctx = (BucketingSortingCtx)procCtx; ExtractOperator exop = (ExtractOperator)nd; @@ -591,7 +591,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, public static class MultiGroupByInferrer extends GroupByInferrer implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { BucketingSortingCtx bctx = (BucketingSortingCtx)procCtx; GroupByOperator gop = (GroupByOperator)nd; @@ -633,7 +633,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, public static class GroupByInferrer extends DefaultInferrer implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { BucketingSortingCtx bctx = (BucketingSortingCtx)procCtx; GroupByOperator gop = (GroupByOperator)nd; @@ -726,7 +726,7 @@ protected Object processGroupBy(Operator parent, GroupBy @SuppressWarnings("unchecked") @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { processForward((Operator)nd, (BucketingSortingCtx)procCtx, getParent(stack)); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinResolver.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinResolver.java index 736df45..e3d7fea 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinResolver.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinResolver.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hive.ql.lib.Dispatcher; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.TaskGraphWalker; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; /* * Convert tasks involving JOIN into MAPJOIN. @@ -65,7 +65,7 @@ */ public class CommonJoinResolver implements PhysicalPlanResolver { @Override - public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { + public PhysicalContext resolve(PhysicalContext pctx) throws HiveException { // create dispatcher and graph walker Dispatcher disp = new CommonJoinTaskDispatcher(pctx); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java index 74ca355..1c6c652 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; import org.apache.hadoop.hive.ql.lib.Dispatcher; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; import org.apache.hadoop.hive.ql.optimizer.MapJoinProcessor; import org.apache.hadoop.hive.ql.parse.ParseContext; @@ -181,7 +182,7 @@ private int getPosition(MapWork work, Operator joinOp, // create map join task and set big table as bigTablePosition private ObjectPair convertTaskToMapJoinTask(MapredWork newWork, - int bigTablePosition) throws UnsupportedEncodingException, SemanticException { + int bigTablePosition) throws UnsupportedEncodingException, HiveException { // create a mapred task for this work MapRedTask newTask = (MapRedTask) TaskFactory.get(newWork, physicalContext .getParseContext().getConf()); @@ -199,7 +200,7 @@ private int getPosition(MapWork work, Operator joinOp, * See if the two tasks can be merged. */ private void mergeMapJoinTaskIntoItsChildMapRedTask(MapRedTask mapJoinTask, Configuration conf) - throws SemanticException{ + throws HiveException { // Step 1: Check if mapJoinTask has a single child. // If so, check if we can merge mapJoinTask into that child. if (mapJoinTask.getChildTasks() == null @@ -393,7 +394,7 @@ public static boolean cannotConvert(String bigTableAlias, @Override public Task processCurrentTask(MapRedTask currTask, ConditionalTask conditionalTask, Context context) - throws SemanticException { + throws HiveException { // whether it contains common join op; if contains, return this common join op JoinOperator joinOp = getJoinOp(currTask); @@ -527,7 +528,7 @@ public static boolean cannotConvert(String bigTableAlias, } } catch (Exception e) { e.printStackTrace(); - throw new SemanticException("Generate Map Join Task Error: " + e.getMessage()); + throw new SemanticException("Generate Map Join Task Error: " + e.getMessage(), e); } // insert current common join task to conditional task @@ -580,7 +581,7 @@ private boolean checkOperatorOKMapJoinConversion(Operator currTask, ParseContext parseCtx) - throws SemanticException { + throws HiveException { // We are trying to adding map joins to handle skew keys, and map join right // now does not work with outer joins diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/IndexWhereResolver.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/IndexWhereResolver.java index 9caeba4..d4138de 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/IndexWhereResolver.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/IndexWhereResolver.java @@ -24,13 +24,13 @@ import org.apache.hadoop.hive.ql.lib.Dispatcher; import org.apache.hadoop.hive.ql.lib.GraphWalker; import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.physical.index.IndexWhereTaskDispatcher; -import org.apache.hadoop.hive.ql.parse.SemanticException; public class IndexWhereResolver implements PhysicalPlanResolver { @Override - public PhysicalContext resolve(PhysicalContext physicalContext) throws SemanticException { + public PhysicalContext resolve(PhysicalContext physicalContext) throws HiveException { Dispatcher dispatcher = new IndexWhereTaskDispatcher(physicalContext); GraphWalker opGraphWalker = new DefaultGraphWalker(dispatcher); ArrayList topNodes = new ArrayList(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java index 5a53e15..5d29ef0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.physical.MapJoinResolver.LocalMapJoinProcCtx; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.HashTableDummyDesc; @@ -71,7 +72,7 @@ public static NodeProcessor getDefaultProc() { return new NodeProcessor() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { return null; } }; @@ -83,7 +84,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, */ public static class MapJoinFollowedByGroupByProcessor implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object... nodeOutputs) - throws SemanticException { + throws HiveException { LocalMapJoinProcCtx context = (LocalMapJoinProcCtx) ctx; if (!nd.getName().equals("GBY")) { return null; @@ -103,7 +104,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object.. */ public static class LocalMapJoinProcessor implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object... nodeOutputs) - throws SemanticException { + throws HiveException { LocalMapJoinProcCtx context = (LocalMapJoinProcCtx) ctx; if (!nd.getName().equals("MAPJOIN")) { return null; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MapJoinResolver.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MapJoinResolver.java index 010ac54..6af69ef 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MapJoinResolver.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MapJoinResolver.java @@ -44,8 +44,8 @@ import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; import org.apache.hadoop.hive.ql.lib.TaskGraphWalker; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ConditionalResolver; import org.apache.hadoop.hive.ql.plan.ConditionalResolverCommonJoin; import org.apache.hadoop.hive.ql.plan.ConditionalResolverCommonJoin.ConditionalResolverCommonJoinCtx; @@ -64,7 +64,7 @@ */ public class MapJoinResolver implements PhysicalPlanResolver { @Override - public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { + public PhysicalContext resolve(PhysicalContext pctx) throws HiveException { // create dispatcher and graph walker Dispatcher disp = new LocalMapJoinTaskDispatcher(pctx); @@ -94,7 +94,7 @@ public LocalMapJoinTaskDispatcher(PhysicalContext context) { } private void processCurrentTask(Task currTask, - ConditionalTask conditionalTask) throws SemanticException { + ConditionalTask conditionalTask) throws HiveException { // get current mapred work and its local work MapredWork mapredWork = (MapredWork) currTask.getWork(); MapredLocalWork localwork = mapredWork.getMapWork().getMapLocalWork(); @@ -214,7 +214,7 @@ private void processCurrentTask(Task currTask, @Override public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) - throws SemanticException { + throws HiveException { Task currTask = (Task) nd; // not map reduce task or not conditional task, just skip if (currTask.isMapRedTask()) { @@ -235,7 +235,7 @@ public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) // replace the map join operator to local_map_join operator in the operator tree private LocalMapJoinProcCtx adjustLocalTask(MapredLocalTask task) - throws SemanticException { + throws HiveException { LocalMapJoinProcCtx localMapJoinProcCtx = new LocalMapJoinProcCtx(task, physicalContext .getParseContext()); Map opRules = new LinkedHashMap(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MetadataOnlyOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MetadataOnlyOptimizer.java index 7b09534..5581805 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MetadataOnlyOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MetadataOnlyOptimizer.java @@ -47,8 +47,8 @@ import org.apache.hadoop.hive.ql.lib.PreOrderWalker; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; @@ -128,7 +128,7 @@ public TableScanProcessor() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { TableScanOperator node = (TableScanOperator) nd; TableScanOperator tsOp = (TableScanOperator) nd; WalkerCtx walkerCtx = (WalkerCtx) procCtx; @@ -147,7 +147,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, static private class FileSinkProcessor implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { WalkerCtx walkerCtx = (WalkerCtx) procCtx; // There can be atmost one element eligible to be converted to // metadata only @@ -173,7 +173,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } @Override - public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { + public PhysicalContext resolve(PhysicalContext pctx) throws HiveException { Dispatcher disp = new MetadataOnlyTaskDispatcher(pctx); GraphWalker ogw = new DefaultGraphWalker(disp); ArrayList topNodes = new ArrayList(); @@ -255,7 +255,7 @@ private String encode(Map partSpec) { @Override public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) - throws SemanticException { + throws HiveException { Task task = (Task) nd; Collection> topOperators diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java index d593d08..83bd325 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.parse.SemanticException; /** * A hierarchy physical optimizer, which contains a list of @@ -95,7 +94,7 @@ private void initialize(HiveConf hiveConf) { * @return PhysicalContext * @throws HiveException */ - public PhysicalContext optimize() throws SemanticException { + public PhysicalContext optimize() throws HiveException { for (PhysicalPlanResolver r : resolvers) { pctx = r.resolve(pctx); } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalPlanResolver.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalPlanResolver.java index f1d4218..3dd57c7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalPlanResolver.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalPlanResolver.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql.optimizer.physical; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * Physical plan optimization interface. Each resolver has its own set of @@ -32,6 +32,6 @@ * @param pctx * @return the physical plan */ - PhysicalContext resolve(PhysicalContext pctx) throws SemanticException; + PhysicalContext resolve(PhysicalContext pctx) throws HiveException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SamplingOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SamplingOptimizer.java index 2e1d15c..c9c5283 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SamplingOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SamplingOptimizer.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.ReduceWork; @@ -36,7 +36,7 @@ */ public class SamplingOptimizer implements PhysicalPlanResolver { - public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { + public PhysicalContext resolve(PhysicalContext pctx) throws HiveException { for (Task task : pctx.getRootTasks()) { if (!(task instanceof MapRedTask) || !((MapRedTask)task).getWork().isFinalMapRed()) { continue; // this could be replaced by bucketing on RS + bucketed fetcher for next MR diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinProcFactory.java index 58e373e..545abca 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinProcFactory.java @@ -26,9 +26,9 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.physical.SkewJoinResolver.SkewJoinProcCtx; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; /** * Node processor factory for skew join resolver. @@ -49,7 +49,7 @@ public static NodeProcessor getJoinProc() { */ public static class SkewJoinJoinProcessor implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { SkewJoinProcCtx context = (SkewJoinProcCtx) ctx; JoinOperator op = (JoinOperator) nd; if (op.getConf().isFixedAsSorted()) { @@ -68,7 +68,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, */ public static class SkewJoinDefaultProcessor implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { return null; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java index f48d118..c257337 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java @@ -36,8 +36,8 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.MapredWork; /** @@ -47,7 +47,7 @@ */ public class SkewJoinResolver implements PhysicalPlanResolver { @Override - public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { + public PhysicalContext resolve(PhysicalContext pctx) throws HiveException { Dispatcher disp = new SkewJoinTaskDispatcher(pctx); GraphWalker ogw = new DefaultGraphWalker(disp); ArrayList topNodes = new ArrayList(); @@ -70,7 +70,7 @@ public SkewJoinTaskDispatcher(PhysicalContext context) { @Override public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) - throws SemanticException { + throws HiveException { Task task = (Task) nd; if (!task.isMapRedTask() || task instanceof ConditionalTask diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinResolver.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinResolver.java index e9f0311..4fe35d6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinResolver.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinResolver.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hive.ql.lib.Dispatcher; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.TaskGraphWalker; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; /* * If a join has been automatically converted into a sort-merge join, create a conditional @@ -40,7 +40,7 @@ */ public class SortMergeJoinResolver implements PhysicalPlanResolver { @Override - public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { + public PhysicalContext resolve(PhysicalContext pctx) throws HiveException { // create dispatcher and graph walker Dispatcher disp = new SortMergeJoinTaskDispatcher(pctx); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java index f4cd3ab..e3b2bae 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; import org.apache.hadoop.hive.ql.lib.Dispatcher; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.MapJoinProcessor; import org.apache.hadoop.hive.ql.parse.OpParseContext; import org.apache.hadoop.hive.ql.parse.ParseContext; @@ -145,7 +146,7 @@ private void genSMBJoinWork(MapWork currWork, SMBMapJoinOperator smbJoinOp) { * plan is changed (aliasToWork etc.) to contain all the paths as if it was a regular join. */ private MapredWork convertSMBWorkToJoinWork(MapredWork currWork, SMBMapJoinOperator oldSMBJoinOp) - throws SemanticException { + throws HiveException { try { // deep copy a new mapred work MapredWork currJoinWork = Utilities.clonePlan(currWork); @@ -160,7 +161,7 @@ private MapredWork convertSMBWorkToJoinWork(MapredWork currWork, SMBMapJoinOpera return currJoinWork; } catch (Exception e) { e.printStackTrace(); - throw new SemanticException("Generate Map Join Task Error: " + e.getMessage()); + throw new SemanticException("Generate Map Join Task Error: " + e.getMessage(), e); } } @@ -169,7 +170,7 @@ private MapredWork convertSMBWorkToJoinWork(MapredWork currWork, SMBMapJoinOpera int bigTablePosition, SMBMapJoinOperator smbJoinOp, QBJoinTree joinTree) - throws UnsupportedEncodingException, SemanticException { + throws UnsupportedEncodingException, HiveException { // deep copy a new mapred work MapredWork newWork = Utilities.clonePlan(origWork); // create a mapred task for this work @@ -234,7 +235,7 @@ private boolean isEligibleForOptimization(SMBMapJoinOperator originalSMBJoinOp) @Override public Task processCurrentTask(MapRedTask currTask, ConditionalTask conditionalTask, Context context) - throws SemanticException { + throws HiveException { // whether it contains a sort merge join operator MapredWork currWork = currTask.getWork(); SMBMapJoinOperator originalSMBJoinOp = getSMBMapJoinOp(currWork); @@ -407,7 +408,7 @@ private SMBMapJoinOperator getSMBMapJoinOp(Operator curr } } - private SMBMapJoinOperator getSMBMapJoinOp(MapredWork work) throws SemanticException { + private SMBMapJoinOperator getSMBMapJoinOp(MapredWork work) throws HiveException { if (work != null && work.getReduceWork() != null) { Operator reducer = work.getReduceWork().getReducer(); for (Operator op : work.getMapWork().getAliasToWork().values()) { @@ -424,7 +425,7 @@ private MapJoinOperator getMapJoinOperator(MapRedTask task, MapredWork work, SMBMapJoinOperator oldSMBJoinOp, QBJoinTree joinTree, - int mapJoinPos) throws SemanticException { + int mapJoinPos) throws HiveException { SMBMapJoinOperator newSMBJoinOp = getSMBMapJoinOp(task.getWork()); // Add the row resolver for the new operator diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java index 9af3994..341dca7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.ConditionalTask; import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * Simple renumbering of stage ids @@ -44,7 +44,7 @@ } @Override - public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { + public PhysicalContext resolve(PhysicalContext pctx) throws HiveException { int counter = 0; for (Task task : getExplainOrder(pctx)) { task.setId(PREFIX + (++counter)); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java index 32fd191..04f621e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java @@ -67,7 +67,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.RowResolver; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.AbstractOperatorDesc; import org.apache.hadoop.hive.ql.plan.AggregationDesc; import org.apache.hadoop.hive.ql.plan.BaseWork; @@ -293,7 +292,7 @@ public VectorizationDispatcher(PhysicalContext pctx) { @Override public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) - throws SemanticException { + throws HiveException { Task currTask = (Task) nd; if (currTask instanceof MapRedTask) { convertMapWork(((MapRedTask) currTask).getWork().getMapWork()); @@ -308,14 +307,14 @@ public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) return null; } - private void convertMapWork(MapWork mapWork) throws SemanticException { + private void convertMapWork(MapWork mapWork) throws HiveException { boolean ret = validateMapWork(mapWork); if (ret) { vectorizeMapWork(mapWork); } } - private boolean validateMapWork(MapWork mapWork) throws SemanticException { + private boolean validateMapWork(MapWork mapWork) throws HiveException { // Validate the input format for (String path : mapWork.getPathToPartitionInfo().keySet()) { @@ -351,7 +350,7 @@ private boolean validateMapWork(MapWork mapWork) throws SemanticException { return true; } - private void vectorizeMapWork(MapWork mapWork) throws SemanticException { + private void vectorizeMapWork(MapWork mapWork) throws HiveException { LOG.info("Vectorizing task..."); mapWork.setVectorMode(true); Map opRules = new LinkedHashMap(); @@ -386,7 +385,7 @@ private void vectorizeMapWork(MapWork mapWork) throws SemanticException { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { for (Node n : stack) { Operator op = (Operator) n; if (op.getType().equals(OperatorType.REDUCESINK) && @@ -443,7 +442,7 @@ public VectorizationNodeProcessor(MapWork mWork) { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { Operator op = (Operator) nd; @@ -488,23 +487,19 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, opsDone.add(op); } } else { - try { - if (!opsDone.contains(op)) { - Operator vectorOp = - vectorizeOperator(op, vContext); - opsDone.add(op); - if (vectorOp != op) { - opsDone.add(vectorOp); - } - if (vectorOp instanceof VectorizationContextRegion) { - VectorizationContextRegion vcRegion = (VectorizationContextRegion) vectorOp; - VectorizationContext vOutContext = vcRegion.getOuputVectorizationContext(); - vContextsByTSOp.put(op, vOutContext); - vectorizationContexts.put(vOutContext.getFileKey(), vOutContext); - } + if (!opsDone.contains(op)) { + Operator vectorOp = + vectorizeOperator(op, vContext); + opsDone.add(op); + if (vectorOp != op) { + opsDone.add(vectorOp); + } + if (vectorOp instanceof VectorizationContextRegion) { + VectorizationContextRegion vcRegion = (VectorizationContextRegion) vectorOp; + VectorizationContext vOutContext = vcRegion.getOuputVectorizationContext(); + vContextsByTSOp.put(op, vOutContext); + vectorizationContexts.put(vOutContext.getFileKey(), vOutContext); } - } catch (HiveException e) { - throw new SemanticException(e); } } return null; @@ -528,7 +523,7 @@ protected int getInputColumnIndex(ExprNodeColumnDesc colExpr) { } @Override - public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { + public PhysicalContext resolve(PhysicalContext pctx) throws HiveException { this.physicalContext = pctx; boolean vectorPath = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcessor.java index 5c6751c..675c782 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcessor.java @@ -76,7 +76,7 @@ public IndexWhereProcessor(Map> indexes) { * Process a node of the operator tree. This matches on the rule in IndexWhereTaskDispatcher */ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { TableScanOperator operator = (TableScanOperator) nd; List opChildren = operator.getChildren(); @@ -105,7 +105,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } } catch (HiveException e) { LOG.error("Fatal Error: problem accessing metastore", e); - throw new SemanticException(e); + throw e; } // we can only process MapReduce tasks to check input size @@ -191,7 +191,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, private void rewriteForIndexes(ExprNodeDesc predicate, List indexes, ParseContext pctx, Task task, HiveIndexQueryContext queryContext) - throws SemanticException { + throws HiveException { HiveIndexHandler indexHandler; // All indexes in the list are of the same type, and therefore can use the // same handler to generate the index query tasks @@ -212,7 +212,7 @@ private void rewriteForIndexes(ExprNodeDesc predicate, List indexes, return; } } catch (IOException e) { - throw new SemanticException("Failed to get task size", e); + throw new HiveException("Failed to get task size", e); } // use the IndexHandler to generate the index query diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java index ef86266..2513dbd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java @@ -41,11 +41,11 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.IndexUtils; import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.MapredWork; /** @@ -66,7 +66,7 @@ public IndexWhereTaskDispatcher(PhysicalContext context) { @Override public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) - throws SemanticException { + throws HiveException { Task task = (Task) nd; @@ -107,7 +107,7 @@ public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) * an index on. * @return */ - private Map createOperatorRules(ParseContext pctx) throws SemanticException { + private Map createOperatorRules(ParseContext pctx) throws HiveException { Map operatorRules = new LinkedHashMap(); List supportedIndexes = new ArrayList(); @@ -143,7 +143,7 @@ private NodeProcessor getDefaultProcessor() { return new NodeProcessor() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { return null; } }; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/ExprProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/ExprProcFactory.java index 69fbddf..5179c10 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/ExprProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/ExprProcFactory.java @@ -23,9 +23,9 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.PrunerExpressionOperatorFactory; import org.apache.hadoop.hive.ql.optimizer.PrunerUtils; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -90,7 +90,7 @@ public static NodeProcessor getColumnProcessor() { * @return The pruner expression. */ public static ExprNodeDesc genPruner( - String tabAlias, ExprNodeDesc pred) throws SemanticException { + String tabAlias, ExprNodeDesc pred) throws HiveException { // Create the walker, the rules dispatcher and the context. ExprProcCtx pprCtx = new ExprProcCtx(tabAlias); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java index fd51628..f89aabc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java @@ -23,8 +23,8 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.PrunerOperatorFactory; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; /** @@ -52,7 +52,7 @@ @Override protected void generatePredicate(NodeProcessorCtx procCtx, FilterOperator fop, - TableScanOperator top) throws SemanticException, UDFArgumentException { + TableScanOperator top) throws HiveException, UDFArgumentException { OpWalkerCtx owc = (OpWalkerCtx) procCtx; // Otherwise this is not a sampling predicate and we need to ExprNodeDesc predicate = fop.getConf().getPredicate(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java index 6bdf394..bb62922 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java @@ -76,7 +76,7 @@ * .hive.ql.parse.ParseContext) */ @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { // create a the context for walking operators OpWalkerCtx opWalkerCtx = new OpWalkerCtx(pctx.getOpToPartPruner()); @@ -327,10 +327,8 @@ private static PrunedPartitionList getPartitionsFromServer(Table tab, boolean isPruningByExactFilter = oldFilter.equals(compactExpr.getExprString()); return new PrunedPartitionList(tab, new LinkedHashSet(partitions), hasUnknownPartitions || !isPruningByExactFilter); - } catch (HiveException e) { - throw e; } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/AnnotateWithStatistics.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/AnnotateWithStatistics.java index ccd102a..b6def76 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/AnnotateWithStatistics.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/AnnotateWithStatistics.java @@ -37,14 +37,14 @@ import org.apache.hadoop.hive.ql.lib.PreOrderWalker; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.Transform; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; public class AnnotateWithStatistics implements Transform { @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { AnnotateStatsProcCtx aspCtx = new AnnotateStatsProcCtx(pctx); // create a walker which walks the tree in a DFS manner while maintaining the diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java index d03a760..3310559 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.AggregationDesc; @@ -89,16 +90,13 @@ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { TableScanOperator tsop = (TableScanOperator) nd; AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx; - PrunedPartitionList partList = null; - try { - partList = aspCtx.getParseContext().getPrunedPartitions(tsop.getName(), tsop); - } catch (HiveException e1) { - throw new SemanticException(e1); - } - Table table = aspCtx.getParseContext().getTopToTable().get(tsop); + + ParseContext pctx = aspCtx.getParseContext(); + PrunedPartitionList partList = pctx.getPrunedPartitions(tsop.getName(), tsop); + Table table = pctx.getTopToTable().get(tsop); // gather statistics for the first time and the attach it to table scan operator Statistics stats = StatsUtils.collectStatistics(aspCtx.getConf(), partList, table, tsop); @@ -134,7 +132,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { SelectOperator sop = (SelectOperator) nd; Operator parent = sop.getParentOperators().get(0); @@ -232,7 +230,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx; FilterOperator fop = (FilterOperator) nd; Operator parent = fop.getParentOperators().get(0); @@ -545,7 +543,7 @@ private long evaluateChildExpr(Statistics stats, ExprNodeDesc child, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { GroupByOperator gop = (GroupByOperator) nd; Operator parent = gop.getParentOperators().get(0); Statistics parentStats = parent.getStatistics(); @@ -732,7 +730,7 @@ private long applyGBYRule(long numRows, long dvProd) { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { CommonJoinOperator jop = (CommonJoinOperator) nd; List> parents = jop.getParentOperators(); AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx; @@ -955,7 +953,7 @@ private long getDenominator(List distinctVals) { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { LimitOperator lop = (LimitOperator) nd; Operator parent = lop.getParentOperators().get(0); Statistics parentStats = parent.getStatistics(); @@ -1017,7 +1015,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { Operator op = (Operator) nd; OperatorDesc conf = op.getConf(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java index a985c4f..1f5559b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java @@ -32,8 +32,8 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext.UnionParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; @@ -67,7 +67,7 @@ public static int getPositionParent(UnionOperator union, Stack stack) { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { UnionOperator union = (UnionOperator) nd; UnionProcContext ctx = (UnionProcContext) procCtx; @@ -92,7 +92,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { UnionOperator union = (UnionOperator) nd; UnionProcContext ctx = (UnionProcContext) procCtx; @@ -117,7 +117,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { UnionOperator union = (UnionOperator) nd; UnionProcContext ctx = (UnionProcContext) procCtx; @@ -174,7 +174,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, public static class UnionNoProcessFile implements NodeProcessor { private void pushOperatorsAboveUnion(UnionOperator union, - Stack stack, int pos) throws SemanticException { + Stack stack, int pos) throws HiveException { // Clone all the operators between union and filescan, and push them above // the union. Remove the union (the tree below union gets delinked after that) try { @@ -238,13 +238,13 @@ private void pushOperatorsAboveUnion(UnionOperator union, union.setChildOperators(null); union.setParentOperators(null); } catch (Exception e) { - throw new SemanticException(e.getMessage()); + throw HiveException.wrap(e); } } @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { FileSinkOperator fileSinkOp = (FileSinkOperator)nd; // Has this filesink already been processed @@ -303,7 +303,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { return null; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java index c973d98..5431111 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hive.ql.lib.PreOrderWalker; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.Transform; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -65,7 +66,7 @@ public UnionProcessor() { * @param pCtx * the current parse context */ - public ParseContext transform(ParseContext pCtx) throws SemanticException { + public ParseContext transform(ParseContext pCtx) throws HiveException { // create a walker which walks the tree in a DFS manner while maintaining // the operator stack. Map opRules = new LinkedHashMap(); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/AbstractSemanticAnalyzerHook.java ql/src/java/org/apache/hadoop/hive/ql/parse/AbstractSemanticAnalyzerHook.java index 32b89a7..11ae1e0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/AbstractSemanticAnalyzerHook.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/AbstractSemanticAnalyzerHook.java @@ -22,16 +22,17 @@ import java.util.List; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.metadata.HiveException; public abstract class AbstractSemanticAnalyzerHook implements HiveSemanticAnalyzerHook { public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context,ASTNode ast) - throws SemanticException { + throws HiveException { return ast; } public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws HiveException { } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 13d0a56..f2858a4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -21,8 +21,6 @@ import java.io.Serializable; import java.io.UnsupportedEncodingException; import java.sql.Date; -import java.text.DateFormat; -import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -49,7 +47,6 @@ import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.LineageInfo; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; @@ -146,7 +143,7 @@ String lineDelim = null; String nullFormat = null; - protected void analyzeRowFormat(AnalyzeCreateCommonVars shared, ASTNode child) throws SemanticException { + protected void analyzeRowFormat(AnalyzeCreateCommonVars shared, ASTNode child) throws HiveException { child = (ASTNode) child.getChild(0); int numChildRowFormat = child.getChildCount(); for (int numC = 0; numC < numChildRowFormat; numC++) { @@ -264,11 +261,11 @@ protected void fillDefaultStorageFormat(AnalyzeCreateCommonVars shared) { } } - public BaseSemanticAnalyzer(HiveConf conf) throws SemanticException { + public BaseSemanticAnalyzer(HiveConf conf) throws HiveException { this(conf, createHiveDB(conf)); } - public BaseSemanticAnalyzer(HiveConf conf, Hive db) throws SemanticException { + public BaseSemanticAnalyzer(HiveConf conf, Hive db) throws HiveException { try { this.conf = conf; this.db = db; @@ -279,23 +276,19 @@ public BaseSemanticAnalyzer(HiveConf conf, Hive db) throws SemanticException { inputs = new LinkedHashSet(); outputs = new LinkedHashSet(); } catch (Exception e) { - throw new SemanticException(e); + throw SemanticException.wrap(e); } } - protected static Hive createHiveDB(HiveConf conf) throws SemanticException { - try { - return Hive.get(conf); - } catch (HiveException e) { - throw new SemanticException(e); - } + protected static Hive createHiveDB(HiveConf conf) throws HiveException { + return Hive.get(conf); } public HashMap getIdToTableNameMap() { return idToTableNameMap; } - public abstract void analyzeInternal(ASTNode ast) throws SemanticException; + public abstract void analyzeInternal(ASTNode ast) throws HiveException; public void init() { //no-op } @@ -304,13 +297,13 @@ public void initCtx(Context ctx) { this.ctx = ctx; } - public void analyze(ASTNode ast, Context ctx) throws SemanticException { + public void analyze(ASTNode ast, Context ctx) throws HiveException { initCtx(ctx); init(); analyzeInternal(ast); } - public void validate() throws SemanticException { + public void validate() throws HiveException { // Implementations may choose to override this } @@ -349,7 +342,7 @@ public static String stripQuotes(String val) { } public static String charSetString(String charSetName, String charSetString) - throws SemanticException { + throws HiveException { try { // The character set name starts with a _, so strip that charSetName = charSetName.substring(1); @@ -377,7 +370,7 @@ public static String charSetString(String charSetName, String charSetString) return res; } } catch (UnsupportedEncodingException e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } } @@ -585,11 +578,11 @@ public static String unescapeSQLString(String b) { return null; } - protected List getColumns(ASTNode ast) throws SemanticException { + protected List getColumns(ASTNode ast) throws HiveException { return getColumns(ast, true); } - protected void handleGenericFileFormat(ASTNode node) throws SemanticException{ + protected void handleGenericFileFormat(ASTNode node) throws HiveException{ ASTNode child = (ASTNode)node.getChild(0); throw new SemanticException("Unrecognized file format in STORED AS clause:"+ @@ -599,7 +592,7 @@ protected void handleGenericFileFormat(ASTNode node) throws SemanticException{ /** * Get the list of FieldSchema out of the ASTNode. */ - public static List getColumns(ASTNode ast, boolean lowerCase) throws SemanticException { + public static List getColumns(ASTNode ast, boolean lowerCase) throws HiveException { List colList = new ArrayList(); int numCh = ast.getChildCount(); for (int i = 0; i < numCh; i++) { @@ -654,7 +647,7 @@ protected void handleGenericFileFormat(ASTNode node) throws SemanticException{ } protected static String getTypeStringFromAST(ASTNode typeNode) - throws SemanticException { + throws HiveException { switch (typeNode.getType()) { case HiveParser.TOK_LIST: return serdeConstants.LIST_TYPE_NAME + "<" @@ -673,7 +666,7 @@ protected static String getTypeStringFromAST(ASTNode typeNode) } private static String getStructTypeStringFromAST(ASTNode typeNode) - throws SemanticException { + throws HiveException { String typeStr = serdeConstants.STRUCT_TYPE_NAME + "<"; typeNode = (ASTNode) typeNode.getChild(0); int children = typeNode.getChildCount(); @@ -695,7 +688,7 @@ private static String getStructTypeStringFromAST(ASTNode typeNode) } private static String getUnionTypeStringFromAST(ASTNode typeNode) - throws SemanticException { + throws HiveException { String typeStr = serdeConstants.UNION_TYPE_NAME + "<"; typeNode = (ASTNode) typeNode.getChild(0); int children = typeNode.getChildCount(); @@ -729,12 +722,12 @@ private static String getUnionTypeStringFromAST(ASTNode typeNode) public SpecType specType; public tableSpec(Hive db, HiveConf conf, ASTNode ast) - throws SemanticException { + throws HiveException { this(db, conf, ast, true, false); } public tableSpec(Hive db, HiveConf conf, ASTNode ast, boolean allowDynamicPartitionsSpec, - boolean allowPartialPartitionsSpec) throws SemanticException { + boolean allowPartialPartitionsSpec) throws HiveException { assert (ast.getToken().getType() == HiveParser.TOK_TAB || ast.getToken().getType() == HiveParser.TOK_TABLE_PARTITION || ast.getToken().getType() == HiveParser.TOK_TABTYPE @@ -928,7 +921,7 @@ public void setColumnAccessInfo(ColumnAccessInfo columnAccessInfo) { } protected HashMap extractPartitionSpecs(Tree partspec) - throws SemanticException { + throws HiveException { HashMap partSpec = new LinkedHashMap(); for (int i = 0; i < partspec.getChildCount(); ++i) { CommonTree partspec_val = (CommonTree) partspec.getChild(i); @@ -985,7 +978,7 @@ final public boolean isValidPrefixSpec(Table tTable, Map spec) } private static void ErrorPartSpec(Map partSpec, - List parts) throws SemanticException { + List parts) throws HiveException { StringBuilder sb = new StringBuilder( "Partition columns in the table schema are: ("); @@ -1059,24 +1052,19 @@ protected ListBucketingCtx constructListBucketingCtx(List skewedColNames * * @param node * @return - * @throws SemanticException + * @throws HiveException */ - protected List getSkewedValuesFromASTNode(Node node) throws SemanticException { - List result = null; + protected List getSkewedValuesFromASTNode(Node node) throws HiveException { Tree leafVNode = ((ASTNode) node).getChild(0); if (leafVNode == null) { + throw new SemanticException(ErrorMsg.SKEWED_TABLE_NO_COLUMN_VALUE.getMsg()); + } + ASTNode lVAstNode = (ASTNode) leafVNode; + if (lVAstNode.getToken().getType() != HiveParser.TOK_TABCOLVALUE) { throw new SemanticException( ErrorMsg.SKEWED_TABLE_NO_COLUMN_VALUE.getMsg()); - } else { - ASTNode lVAstNode = (ASTNode) leafVNode; - if (lVAstNode.getToken().getType() != HiveParser.TOK_TABCOLVALUE) { - throw new SemanticException( - ErrorMsg.SKEWED_TABLE_NO_COLUMN_VALUE.getMsg()); - } else { - result = new ArrayList(getSkewedValueFromASTNode(lVAstNode)); - } } - return result; + return new ArrayList(getSkewedValueFromASTNode(lVAstNode)); } /** @@ -1085,22 +1073,19 @@ protected ListBucketingCtx constructListBucketingCtx(List skewedColNames * @param skewedColNames * @param child * @return - * @throws SemanticException + * @throws HiveException */ - protected List analyzeSkewedTablDDLColNames(List skewedColNames, ASTNode child) - throws SemanticException { - Tree nNode = child.getChild(0); + protected List analyzeSkewedTablDDLColNames(ASTNode child) + throws HiveException { + Tree nNode = child.getChild(0); if (nNode == null) { throw new SemanticException(ErrorMsg.SKEWED_TABLE_NO_COLUMN_NAME.getMsg()); - } else { - ASTNode nAstNode = (ASTNode) nNode; - if (nAstNode.getToken().getType() != HiveParser.TOK_TABCOLNAME) { - throw new SemanticException(ErrorMsg.SKEWED_TABLE_NO_COLUMN_NAME.getMsg()); - } else { - skewedColNames = getColumnNames(nAstNode); - } } - return skewedColNames; + ASTNode nAstNode = (ASTNode) nNode; + if (nAstNode.getToken().getType() != HiveParser.TOK_TABCOLNAME) { + throw new SemanticException(ErrorMsg.SKEWED_TABLE_NO_COLUMN_NAME.getMsg()); + } + return getColumnNames(nAstNode); } /** @@ -1110,10 +1095,10 @@ protected ListBucketingCtx constructListBucketingCtx(List skewedColNames * * @param skewedValues * @param child - * @throws SemanticException + * @throws HiveException */ protected void analyzeDDLSkewedValues(List> skewedValues, ASTNode child) - throws SemanticException { + throws HiveException { Tree vNode = child.getChild(1); if (vNode == null) { throw new SemanticException(ErrorMsg.SKEWED_TABLE_NO_COLUMN_VALUE.getMsg()); @@ -1159,7 +1144,7 @@ protected boolean analyzeStoredAdDirs(ASTNode child) { } private static boolean getPartExprNodeDesc(ASTNode astNode, - Map astExprNodeMap) throws SemanticException { + Map astExprNodeMap) throws HiveException { if (astNode == null) { return true; @@ -1188,7 +1173,7 @@ private static boolean getPartExprNodeDesc(ASTNode astNode, } public static void validatePartSpec(Table tbl, Map partSpec, - ASTNode astNode, HiveConf conf, boolean shouldBeFull) throws SemanticException { + ASTNode astNode, HiveConf conf, boolean shouldBeFull) throws HiveException { tbl.validatePartColumnNames(partSpec, shouldBeFull); if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_TYPE_CHECK_ON_INSERT)) { @@ -1222,15 +1207,8 @@ public static void validatePartSpec(Table tbl, Map partSpec, TypeInfoUtils.getTypeInfoFromTypeString(colType); ObjectInspector outputOI = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(expectedType); - Object value = null; String colSpec = partSpec.get(astKeyName); - try { - value = - ExprNodeEvaluatorFactory.get(astExprNodePair.getValue()). - evaluate(colSpec); - } catch (HiveException e) { - throw new SemanticException(e); - } + Object value = ExprNodeEvaluatorFactory.get(astExprNodePair.getValue()).evaluate(colSpec); Object convertedValue = ObjectInspectorConverters.getConverter(inputOI, outputOI).convert(value); if (convertedValue == null) { @@ -1244,7 +1222,7 @@ public static void validatePartSpec(Table tbl, Map partSpec, @VisibleForTesting static void normalizeColSpec(Map partSpec, String colName, - String colType, String originalColSpec, Object colValue) throws SemanticException { + String colType, String originalColSpec, Object colValue) throws HiveException { if (colValue == null) return; // nothing to do with nulls String normalizedColSpec = originalColSpec; if (colType.equals(serdeConstants.DATE_TYPE_NAME)) { @@ -1258,7 +1236,7 @@ static void normalizeColSpec(Map partSpec, String colName, } private static String normalizeDateCol( - Object colValue, String originalColSpec) throws SemanticException { + Object colValue, String originalColSpec) throws HiveException { Date value; if (colValue instanceof DateWritable) { value = ((DateWritable) colValue).get(); @@ -1270,11 +1248,11 @@ private static String normalizeDateCol( return HiveMetaStore.PARTITION_DATE_FORMAT.format(value); } - protected Database getDatabase(String dbName) throws SemanticException { + protected Database getDatabase(String dbName) throws HiveException { return getDatabase(dbName, true); } - protected Database getDatabase(String dbName, boolean throwException) throws SemanticException { + protected Database getDatabase(String dbName, boolean throwException) throws HiveException { try { Database database = db.getDatabase(dbName); if (database == null && throwException) { @@ -1286,17 +1264,17 @@ protected Database getDatabase(String dbName, boolean throwException) throws Sem } } - protected Table getTable(String tblName) throws SemanticException { + protected Table getTable(String tblName) throws HiveException { return getTable(null, tblName, true); } - protected Table getTable(String tblName, boolean throwException) throws SemanticException { + protected Table getTable(String tblName, boolean throwException) throws HiveException { String currentDb = SessionState.get().getCurrentDatabase(); return getTable(currentDb, tblName, throwException); } // qnName : possibly contains database name (dot seperated) - protected Table getTableWithQN(String qnName, boolean throwException) throws SemanticException { + protected Table getTableWithQN(String qnName, boolean throwException) throws HiveException { int dot = qnName.indexOf('.'); if (dot < 0) { String currentDb = SessionState.get().getCurrentDatabase(); @@ -1306,7 +1284,7 @@ protected Table getTableWithQN(String qnName, boolean throwException) throws Sem } protected Table getTable(String database, String tblName, boolean throwException) - throws SemanticException { + throws HiveException { try { Table tab = database == null ? db.getTable(tblName, false) : db.getTable(database, tblName, false); @@ -1320,7 +1298,7 @@ protected Table getTable(String database, String tblName, boolean throwException } protected Partition getPartition(Table table, Map partSpec, - boolean throwException) throws SemanticException { + boolean throwException) throws HiveException { try { Partition partition = db.getPartition(table, partSpec, false); if (partition == null && throwException) { @@ -1333,7 +1311,7 @@ protected Partition getPartition(Table table, Map partSpec, } protected List getPartitions(Table table, Map partSpec, - boolean throwException) throws SemanticException { + boolean throwException) throws HiveException { try { List partitions = partSpec == null ? db.getPartitions(table) : db.getPartitions(table, partSpec); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessAnalyzer.java index 74b595a..0565960 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessAnalyzer.java @@ -24,6 +24,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; public class ColumnAccessAnalyzer { @@ -38,7 +39,7 @@ public ColumnAccessAnalyzer(ParseContext pactx) { pGraphContext = pactx; } - public ColumnAccessInfo analyzeColumnAccess() throws SemanticException { + public ColumnAccessInfo analyzeColumnAccess() throws HiveException { ColumnAccessInfo columnAccessInfo = new ColumnAccessInfo(); Map topOps = pGraphContext.getTopToTable(); for (TableScanOperator op : topOps.keySet()) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java index 5b77e6f..32b3e0d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java @@ -117,7 +117,7 @@ public void setPartKeyType(String partKeyType, int index) { } } - public ColumnStatsSemanticAnalyzer(HiveConf conf) throws SemanticException { + public ColumnStatsSemanticAnalyzer(HiveConf conf) throws HiveException { super(conf); } @@ -246,7 +246,7 @@ private void validatePartitionKeys(String tableName, PartitionList partList) thr } private String constructPartitionName(String tableName, PartitionList partList) - throws SemanticException { + throws HiveException { Table tbl; Partition part; String[] partKeys = partList.getPartKeys(); @@ -287,7 +287,7 @@ private void validatePartitionClause(String tableName, PartitionList partList) t validatePartitionKeys(tableName, partList); } - private StringBuilder genPartitionClause(PartitionList partList) throws SemanticException { + private StringBuilder genPartitionClause(PartitionList partList) throws HiveException { StringBuilder whereClause = new StringBuilder(" where "); boolean predPresent = false; StringBuilder groupByClause = new StringBuilder(" group by "); @@ -342,7 +342,7 @@ private StringBuilder genPartitionClause(PartitionList partList) throws Semantic return retClause; } - private int getNumBitVectorsForNDVEstimation(HiveConf conf) throws SemanticException { + private int getNumBitVectorsForNDVEstimation(HiveConf conf) throws HiveException { int numBitVectors; float percentageError = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_STATS_NDV_ERROR); @@ -397,7 +397,7 @@ private int getNumBitVectorsForNDVEstimation(HiveConf conf) throws SemanticExcep } private List getTableColumnType(String tableName, List colNames, int numCols) - throws SemanticException{ + throws HiveException{ List colTypes = new LinkedList(); String colName; Table tbl; @@ -421,7 +421,7 @@ private int getNumBitVectorsForNDVEstimation(HiveConf conf) throws SemanticExcep } private List getPartitionColumnType(String tableName, String partName, - List colNames, int numCols) throws SemanticException { + List colNames, int numCols) throws HiveException { List colTypes = new LinkedList(); String colName; Table tbl; @@ -455,7 +455,7 @@ private int getNumBitVectorsForNDVEstimation(HiveConf conf) throws SemanticExcep } private String genRewrittenQuery(List colNames, int numBitVectors, PartitionList partList, - boolean isPartitionStats) throws SemanticException{ + boolean isPartitionStats) throws HiveException{ StringBuilder rewrittenQueryBuilder = new StringBuilder("select "); String rewrittenQuery; for (int i = 0; i < colNames.size(); i++) { @@ -483,7 +483,7 @@ private String genRewrittenQuery(List colNames, int numBitVectors, Parti return rewrittenQuery; } - private ASTNode genRewrittenTree(String rewrittenQuery) throws SemanticException { + private ASTNode genRewrittenTree(String rewrittenQuery) throws HiveException { ASTNode rewrittenTree; // Parse the rewritten query string try { @@ -503,7 +503,7 @@ private ASTNode genRewrittenTree(String rewrittenQuery) throws SemanticException return rewrittenTree; } - public ColumnStatsSemanticAnalyzer(HiveConf conf, ASTNode tree) throws SemanticException { + public ColumnStatsSemanticAnalyzer(HiveConf conf, ASTNode tree) throws HiveException { super(conf); // check if it is no scan. grammar prevents coexit noscan/columns super.processNoScanCommand(tree); @@ -548,7 +548,7 @@ public ColumnStatsSemanticAnalyzer(HiveConf conf, ASTNode tree) throws SemanticE // fail early if the columns specified for column statistics are not valid private void validateSpecifiedColumnNames(String tableName, List specifiedCols) - throws SemanticException { + throws HiveException { List fields = null; try { fields = db.getTable(tableName).getAllCols(); @@ -565,7 +565,7 @@ private void validateSpecifiedColumnNames(String tableName, List specifi } } - private List getPartitionKeys(String tableName) throws SemanticException { + private List getPartitionKeys(String tableName) throws HiveException { List fields; try { fields = db.getTable(tableName).getPartitionKeys(); @@ -577,7 +577,7 @@ private void validateSpecifiedColumnNames(String tableName, List specifi } private void checkForPartitionColumns(List specifiedCols, List partCols) - throws SemanticException { + throws HiveException { // Raise error if user has specified partition column for stats for (String pc : partCols) { for (String sc : specifiedCols) { @@ -590,7 +590,7 @@ private void checkForPartitionColumns(List specifiedCols, List p } @Override - public void analyze(ASTNode ast, Context origCtx) throws SemanticException { + public void analyze(ASTNode ast, Context origCtx) throws HiveException { QB qb; QBParseInfo qbp; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 81426cd..7185280 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -158,7 +158,7 @@ TokenToTypeName.put(HiveParser.TOK_DECIMAL, serdeConstants.DECIMAL_TYPE_NAME); } - public static String getTypeName(ASTNode node) throws SemanticException { + public static String getTypeName(ASTNode node) throws HiveException { int token = node.getType(); String typeName; @@ -193,7 +193,7 @@ public static String getTypeName(ASTNode node) throws SemanticException { public TablePartition() { } - public TablePartition(ASTNode tblPart) throws SemanticException { + public TablePartition(ASTNode tblPart) throws HiveException { tableName = unescapeIdentifier(tblPart.getChild(0).getText()); if (tblPart.getChildCount() > 1) { ASTNode part = (ASTNode) tblPart.getChild(1); @@ -204,11 +204,11 @@ public TablePartition(ASTNode tblPart) throws SemanticException { } } - public DDLSemanticAnalyzer(HiveConf conf) throws SemanticException { + public DDLSemanticAnalyzer(HiveConf conf) throws HiveException { this(conf, createHiveDB(conf)); } - public DDLSemanticAnalyzer(HiveConf conf, Hive db) throws SemanticException { + public DDLSemanticAnalyzer(HiveConf conf, Hive db) throws HiveException { super(conf, db); reservedPartitionValues = new HashSet(); // Partition can't have this name @@ -222,7 +222,7 @@ public DDLSemanticAnalyzer(HiveConf conf, Hive db) throws SemanticException { } @Override - public void analyzeInternal(ASTNode ast) throws SemanticException { + public void analyzeInternal(ASTNode ast) throws HiveException { switch (ast.getToken().getType()) { case HiveParser.TOK_ALTERTABLE_PARTITION: { @@ -457,7 +457,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { } } - private void analyzeGrantRevokeRole(boolean grant, ASTNode ast) throws SemanticException { + private void analyzeGrantRevokeRole(boolean grant, ASTNode ast) throws HiveException { Task task; if(grant) { task = hiveAuthorizationTaskFactory.createGrantRoleTask(ast, getInputs(), getOutputs()); @@ -469,7 +469,7 @@ private void analyzeGrantRevokeRole(boolean grant, ASTNode ast) throws SemanticE } } - private void analyzeShowGrant(ASTNode ast) throws SemanticException { + private void analyzeShowGrant(ASTNode ast) throws HiveException { Task task = hiveAuthorizationTaskFactory. createShowGrantTask(ast, ctx.getResFile(), getInputs(), getOutputs()); if(task != null) { @@ -478,7 +478,7 @@ private void analyzeShowGrant(ASTNode ast) throws SemanticException { } } - private void analyzeGrant(ASTNode ast) throws SemanticException { + private void analyzeGrant(ASTNode ast) throws HiveException { Task task = hiveAuthorizationTaskFactory. createGrantTask(ast, getInputs(), getOutputs()); if(task != null) { @@ -486,7 +486,7 @@ private void analyzeGrant(ASTNode ast) throws SemanticException { } } - private void analyzeRevoke(ASTNode ast) throws SemanticException { + private void analyzeRevoke(ASTNode ast) throws HiveException { Task task = hiveAuthorizationTaskFactory. createRevokeTask(ast, getInputs(), getOutputs()); if(task != null) { @@ -494,7 +494,7 @@ private void analyzeRevoke(ASTNode ast) throws SemanticException { } } - private void analyzeCreateRole(ASTNode ast) throws SemanticException { + private void analyzeCreateRole(ASTNode ast) throws HiveException { Task task = hiveAuthorizationTaskFactory. createCreateRoleTask(ast, getInputs(), getOutputs()); if(task != null) { @@ -502,7 +502,7 @@ private void analyzeCreateRole(ASTNode ast) throws SemanticException { } } - private void analyzeDropRole(ASTNode ast) throws SemanticException { + private void analyzeDropRole(ASTNode ast) throws HiveException { Task task = hiveAuthorizationTaskFactory. createDropRoleTask(ast, getInputs(), getOutputs()); if(task != null) { @@ -510,7 +510,7 @@ private void analyzeDropRole(ASTNode ast) throws SemanticException { } } - private void analyzeShowRoleGrant(ASTNode ast) throws SemanticException { + private void analyzeShowRoleGrant(ASTNode ast) throws HiveException { Task task = hiveAuthorizationTaskFactory. createShowRoleGrantTask(ast, ctx.getResFile(), getInputs(), getOutputs()); if(task != null) { @@ -528,7 +528,7 @@ private void analyzeShowRoles(ASTNode ast) { setFetchTask(createFetchTask(RoleDDLDesc.getSchema())); } - private void analyzeAlterDatabase(ASTNode ast) throws SemanticException { + private void analyzeAlterDatabase(ASTNode ast) throws HiveException { String dbName = unescapeIdentifier(ast.getChild(0).getText()); Map dbProps = null; @@ -552,7 +552,7 @@ private void analyzeAlterDatabase(ASTNode ast) throws SemanticException { } - private void analyzeExchangePartition(ASTNode ast) throws SemanticException { + private void analyzeExchangePartition(ASTNode ast) throws HiveException { Table sourceTable = getTable(getUnescapedName((ASTNode)ast.getChild(0))); Table destTable = getTable(getUnescapedName((ASTNode)ast.getChild(2))); @@ -578,7 +578,7 @@ private void analyzeExchangePartition(ASTNode ast) throws SemanticException { List destPartitions = null; try { destPartitions = getPartitions(destTable, partSpecs, true); - } catch (SemanticException ex) { + } catch (HiveException ex) { // We should expect a semantic exception being throw as this partition // should not be present. } @@ -615,7 +615,7 @@ private boolean isPartitionValueContinuous(List partitionKeys, return true; } - private void analyzeCreateDatabase(ASTNode ast) throws SemanticException { + private void analyzeCreateDatabase(ASTNode ast) throws HiveException { String dbName = unescapeIdentifier(ast.getChild(0).getText()); boolean ifNotExists = false; String dbComment = null; @@ -652,7 +652,7 @@ private void analyzeCreateDatabase(ASTNode ast) throws SemanticException { createDatabaseDesc), conf)); } - private void analyzeDropDatabase(ASTNode ast) throws SemanticException { + private void analyzeDropDatabase(ASTNode ast) throws HiveException { String dbName = unescapeIdentifier(ast.getChild(0).getText()); boolean ifExists = false; boolean ifCascade = false; @@ -687,7 +687,7 @@ private void analyzeSwitchDatabase(ASTNode ast) { private void analyzeDropTable(ASTNode ast, boolean expectView) - throws SemanticException { + throws HiveException { String tableName = getUnescapedName((ASTNode) ast.getChild(0)); boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null); // we want to signal an error if the table/view doesn't exist and we're @@ -706,7 +706,7 @@ private void analyzeDropTable(ASTNode ast, boolean expectView) dropTblDesc), conf)); } - private void analyzeTruncateTable(ASTNode ast) throws SemanticException { + private void analyzeTruncateTable(ASTNode ast) throws HiveException { ASTNode root = (ASTNode) ast.getChild(0); // TOK_TABLE_PARTITION String tableName = getUnescapedName((ASTNode) root.getChild(0)); @@ -748,138 +748,134 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { // Is this a truncate column command List columnNames = null; if (ast.getChildCount() == 2) { - try { - columnNames = getColumnNames((ASTNode)ast.getChild(1)); - - // Throw an error if the table is indexed - List indexes = db.getIndexes(table.getDbName(), tableName, (short)1); - if (indexes != null && indexes.size() > 0) { - throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg()); - } - - List bucketCols = null; - Class inputFormatClass = null; - boolean isArchived = false; - Path newTblPartLoc = null; - Path oldTblPartLoc = null; - List cols = null; - ListBucketingCtx lbCtx = null; - boolean isListBucketed = false; - List listBucketColNames = null; + columnNames = getColumnNames((ASTNode)ast.getChild(1)); - if (table.isPartitioned()) { - Partition part = db.getPartition(table, partSpec, false); - - Path tabPath = table.getPath(); - Path partPath = part.getDataLocation(); + // Throw an error if the table is indexed + List indexes = db.getIndexes(table.getDbName(), tableName, (short)1); + if (indexes != null && indexes.size() > 0) { + throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg()); + } - // if the table is in a different dfs than the partition, - // replace the partition's dfs with the table's dfs. - newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri() - .getAuthority(), partPath.toUri().getPath()); + List bucketCols = null; + Class inputFormatClass = null; + boolean isArchived = false; + Path newTblPartLoc = null; + Path oldTblPartLoc = null; + List cols = null; + ListBucketingCtx lbCtx = null; + boolean isListBucketed = false; + List listBucketColNames = null; + + if (table.isPartitioned()) { + Partition part = db.getPartition(table, partSpec, false); + + Path tabPath = table.getPath(); + Path partPath = part.getDataLocation(); + + // if the table is in a different dfs than the partition, + // replace the partition's dfs with the table's dfs. + newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri() + .getAuthority(), partPath.toUri().getPath()); + + oldTblPartLoc = partPath; + + cols = part.getCols(); + bucketCols = part.getBucketCols(); + inputFormatClass = part.getInputFormatClass(); + isArchived = ArchiveUtils.isArchived(part); + lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(), + part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf); + isListBucketed = part.isStoredAsSubDirectories(); + listBucketColNames = part.getSkewedColNames(); + } else { + // input and output are the same + oldTblPartLoc = table.getPath(); + newTblPartLoc = table.getPath(); + cols = table.getCols(); + bucketCols = table.getBucketCols(); + inputFormatClass = table.getInputFormatClass(); + lbCtx = constructListBucketingCtx(table.getSkewedColNames(), table.getSkewedColValues(), + table.getSkewedColValueLocationMaps(), table.isStoredAsSubDirectories(), conf); + isListBucketed = table.isStoredAsSubDirectories(); + listBucketColNames = table.getSkewedColNames(); + } - oldTblPartLoc = partPath; + // throw a HiveException for non-rcfile. + if (!inputFormatClass.equals(RCFileInputFormat.class)) { + throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_NOT_RC.getMsg()); + } - cols = part.getCols(); - bucketCols = part.getBucketCols(); - inputFormatClass = part.getInputFormatClass(); - isArchived = ArchiveUtils.isArchived(part); - lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(), - part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf); - isListBucketed = part.isStoredAsSubDirectories(); - listBucketColNames = part.getSkewedColNames(); - } else { - // input and output are the same - oldTblPartLoc = table.getPath(); - newTblPartLoc = table.getPath(); - cols = table.getCols(); - bucketCols = table.getBucketCols(); - inputFormatClass = table.getInputFormatClass(); - lbCtx = constructListBucketingCtx(table.getSkewedColNames(), table.getSkewedColValues(), - table.getSkewedColValueLocationMaps(), table.isStoredAsSubDirectories(), conf); - isListBucketed = table.isStoredAsSubDirectories(); - listBucketColNames = table.getSkewedColNames(); - } + // throw a HiveException if the table/partition is archived + if (isArchived) { + throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_ARCHIVED.getMsg()); + } - // throw a HiveException for non-rcfile. - if (!inputFormatClass.equals(RCFileInputFormat.class)) { - throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_NOT_RC.getMsg()); + Set columnIndexes = new HashSet(); + for (String columnName : columnNames) { + boolean found = false; + for (int columnIndex = 0; columnIndex < cols.size(); columnIndex++) { + if (columnName.equalsIgnoreCase(cols.get(columnIndex).getName())) { + columnIndexes.add(columnIndex); + found = true; + break; + } } - - // throw a HiveException if the table/partition is archived - if (isArchived) { - throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_ARCHIVED.getMsg()); + // Throw an exception if the user is trying to truncate a column which doesn't exist + if (!found) { + throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(columnName)); } - - Set columnIndexes = new HashSet(); - for (String columnName : columnNames) { - boolean found = false; - for (int columnIndex = 0; columnIndex < cols.size(); columnIndex++) { - if (columnName.equalsIgnoreCase(cols.get(columnIndex).getName())) { - columnIndexes.add(columnIndex); - found = true; - break; - } + // Throw an exception if the table/partition is bucketed on one of the columns + for (String bucketCol : bucketCols) { + if (bucketCol.equalsIgnoreCase(columnName)) { + throw new SemanticException(ErrorMsg.TRUNCATE_BUCKETED_COLUMN.getMsg(columnName)); } - // Throw an exception if the user is trying to truncate a column which doesn't exist - if (!found) { - throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(columnName)); - } - // Throw an exception if the table/partition is bucketed on one of the columns - for (String bucketCol : bucketCols) { - if (bucketCol.equalsIgnoreCase(columnName)) { - throw new SemanticException(ErrorMsg.TRUNCATE_BUCKETED_COLUMN.getMsg(columnName)); - } - } - if (isListBucketed) { - for (String listBucketCol : listBucketColNames) { - if (listBucketCol.equalsIgnoreCase(columnName)) { - throw new SemanticException( - ErrorMsg.TRUNCATE_LIST_BUCKETED_COLUMN.getMsg(columnName)); - } + } + if (isListBucketed) { + for (String listBucketCol : listBucketColNames) { + if (listBucketCol.equalsIgnoreCase(columnName)) { + throw new SemanticException( + ErrorMsg.TRUNCATE_LIST_BUCKETED_COLUMN.getMsg(columnName)); } } } + } - truncateTblDesc.setColumnIndexes(new ArrayList(columnIndexes)); - - truncateTblDesc.setInputDir(oldTblPartLoc); - addInputsOutputsAlterTable(tableName, partSpec); - - truncateTblDesc.setLbCtx(lbCtx); - - addInputsOutputsAlterTable(tableName, partSpec); - ddlWork.setNeedLock(true); - TableDesc tblDesc = Utilities.getTableDesc(table); - // Write the output to temporary directory and move it to the final location at the end - // so the operation is atomic. - Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc.toUri()); - truncateTblDesc.setOutputDir(queryTmpdir); - LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, - partSpec == null ? new HashMap() : partSpec); - ltd.setLbCtx(lbCtx); - Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), - conf); - truncateTask.addDependentTask(moveTsk); - - // Recalculate the HDFS stats if auto gather stats is set - if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - StatsWork statDesc; - if (oldTblPartLoc.equals(newTblPartLoc)) { - // If we're merging to the same location, we can avoid some metastore calls - tableSpec tablepart = new tableSpec(this.db, conf, root); - statDesc = new StatsWork(tablepart); - } else { - statDesc = new StatsWork(ltd); - } - statDesc.setNoStatsAggregator(true); - statDesc.setClearAggregatorStats(true); - statDesc.setStatsReliable(conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE)); - Task statTask = TaskFactory.get(statDesc, conf); - moveTsk.addDependentTask(statTask); + truncateTblDesc.setColumnIndexes(new ArrayList(columnIndexes)); + + truncateTblDesc.setInputDir(oldTblPartLoc); + addInputsOutputsAlterTable(tableName, partSpec); + + truncateTblDesc.setLbCtx(lbCtx); + + addInputsOutputsAlterTable(tableName, partSpec); + ddlWork.setNeedLock(true); + TableDesc tblDesc = Utilities.getTableDesc(table); + // Write the output to temporary directory and move it to the final location at the end + // so the operation is atomic. + Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc.toUri()); + truncateTblDesc.setOutputDir(queryTmpdir); + LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, + partSpec == null ? new HashMap() : partSpec); + ltd.setLbCtx(lbCtx); + Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), + conf); + truncateTask.addDependentTask(moveTsk); + + // Recalculate the HDFS stats if auto gather stats is set + if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + StatsWork statDesc; + if (oldTblPartLoc.equals(newTblPartLoc)) { + // If we're merging to the same location, we can avoid some metastore calls + tableSpec tablepart = new tableSpec(this.db, conf, root); + statDesc = new StatsWork(tablepart); + } else { + statDesc = new StatsWork(ltd); } - } catch (HiveException e) { - throw new SemanticException(e); + statDesc.setNoStatsAggregator(true); + statDesc.setClearAggregatorStats(true); + statDesc.setStatsReliable(conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE)); + Task statTask = TaskFactory.get(statDesc, conf); + moveTsk.addDependentTask(statTask); } } @@ -895,7 +891,7 @@ private boolean isFullSpec(Table table, Map partSpec) { return true; } - private void analyzeCreateIndex(ASTNode ast) throws SemanticException { + private void analyzeCreateIndex(ASTNode ast) throws HiveException { String indexName = unescapeIdentifier(ast.getChild(0).getText()); String typeName = unescapeSQLString(ast.getChild(1).getText()); String tableName = getUnescapedName((ASTNode) ast.getChild(2)); @@ -977,7 +973,7 @@ private void analyzeCreateIndex(ASTNode ast) throws SemanticException { rootTasks.add(createIndex); } - private void analyzeDropIndex(ASTNode ast) throws SemanticException { + private void analyzeDropIndex(ASTNode ast) throws HiveException { String indexName = unescapeIdentifier(ast.getChild(0).getText()); String tableName = getUnescapedName((ASTNode) ast.getChild(1)); boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null); @@ -998,7 +994,7 @@ private void analyzeDropIndex(ASTNode ast) throws SemanticException { dropIdxDesc), conf)); } - private void analyzeAlterIndexRebuild(ASTNode ast) throws SemanticException { + private void analyzeAlterIndexRebuild(ASTNode ast) throws HiveException { String baseTableName = unescapeIdentifier(ast.getChild(0).getText()); String indexName = unescapeIdentifier(ast.getChild(1).getText()); HashMap partSpec = null; @@ -1023,7 +1019,7 @@ private void analyzeAlterIndexRebuild(ASTNode ast) throws SemanticException { } private void analyzeAlterIndexProps(ASTNode ast) - throws SemanticException { + throws HiveException { String baseTableName = getUnescapedName((ASTNode) ast.getChild(0)); String indexName = unescapeIdentifier(ast.getChild(1).getText()); @@ -1041,7 +1037,7 @@ private void analyzeAlterIndexProps(ASTNode ast) } private List> getIndexBuilderMapRed(String baseTableName, String indexName, - HashMap partSpec) throws SemanticException { + HashMap partSpec) throws HiveException { try { String dbName = SessionState.get().getCurrentDatabase(); Index index = db.getIndex(dbName, baseTableName, indexName); @@ -1064,7 +1060,7 @@ private void analyzeAlterIndexProps(ASTNode ast) index, indexTblPartitions, baseTblPartitions, indexTbl, getInputs(), getOutputs()); return ret; } catch (Exception e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } } @@ -1107,12 +1103,12 @@ private void analyzeAlterIndexProps(ASTNode ast) return baseTblPartitions; } - private void validateAlterTableType(Table tbl, AlterTableTypes op) throws SemanticException { + private void validateAlterTableType(Table tbl, AlterTableTypes op) throws HiveException { validateAlterTableType(tbl, op, false); } private void validateAlterTableType(Table tbl, AlterTableTypes op, boolean expectView) - throws SemanticException { + throws HiveException { if (tbl.isView()) { if (!expectView) { throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_VIEWS.getMsg()); @@ -1141,7 +1137,7 @@ private void validateAlterTableType(Table tbl, AlterTableTypes op, boolean expec } private void analyzeAlterTableProps(ASTNode ast, boolean expectView, boolean isUnset) - throws SemanticException { + throws HiveException { String tableName = getUnescapedName((ASTNode) ast.getChild(0)); HashMap mapProp = getProps((ASTNode) (ast.getChild(1)) @@ -1166,7 +1162,7 @@ private void analyzeAlterTableProps(ASTNode ast, boolean expectView, boolean isU private void analyzeAlterTableSerdeProps(ASTNode ast, String tableName, HashMap partSpec) - throws SemanticException { + throws HiveException { HashMap mapProp = getProps((ASTNode) (ast.getChild(0)) .getChild(0)); AlterTableDesc alterTblDesc = new AlterTableDesc( @@ -1182,7 +1178,7 @@ private void analyzeAlterTableSerdeProps(ASTNode ast, String tableName, private void analyzeAlterTableSerde(ASTNode ast, String tableName, HashMap partSpec) - throws SemanticException { + throws HiveException { String serdeName = unescapeSQLString(ast.getChild(0).getText()); AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableTypes.ADDSERDE); @@ -1202,7 +1198,7 @@ private void analyzeAlterTableSerde(ASTNode ast, String tableName, private void analyzeAlterTableFileFormat(ASTNode ast, String tableName, HashMap partSpec) - throws SemanticException { + throws HiveException { String inputFormat = null; String outputFormat = null; @@ -1220,7 +1216,7 @@ private void analyzeAlterTableFileFormat(ASTNode ast, String tableName, Class.forName(inputFormat); Class.forName(outputFormat); } catch (ClassNotFoundException e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } break; case HiveParser.TOK_STORAGEHANDLER: @@ -1229,7 +1225,7 @@ private void analyzeAlterTableFileFormat(ASTNode ast, String tableName, try { Class.forName(storageHandler); } catch (ClassNotFoundException e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } break; case HiveParser.TOK_TBLSEQUENCEFILE: @@ -1264,12 +1260,12 @@ private void analyzeAlterTableFileFormat(ASTNode ast, String tableName, } private void addInputsOutputsAlterTable(String tableName, Map partSpec) - throws SemanticException { + throws HiveException { addInputsOutputsAlterTable(tableName, partSpec, null); } private void addInputsOutputsAlterTable(String tableName, Map partSpec, - AlterTableDesc desc) throws SemanticException { + AlterTableDesc desc) throws HiveException { Table tab = getTable(tableName, true); if (partSpec == null || partSpec.isEmpty()) { inputs.add(new ReadEntity(tab)); @@ -1310,7 +1306,7 @@ private void addInputsOutputsAlterTable(String tableName, Map pa } private void analyzeAlterTableLocation(ASTNode ast, String tableName, - HashMap partSpec) throws SemanticException { + HashMap partSpec) throws HiveException { String newLocation = unescapeSQLString(ast.getChild(0).getText()); @@ -1323,7 +1319,7 @@ private void analyzeAlterTableLocation(ASTNode ast, String tableName, private void analyzeAlterTableProtectMode(ASTNode ast, String tableName, HashMap partSpec) - throws SemanticException { + throws HiveException { AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableTypes.ALTERPROTECTMODE); @@ -1373,7 +1369,7 @@ private void analyzeAlterTableProtectMode(ASTNode ast, String tableName, private void analyzeAlterTablePartMergeFiles(ASTNode tablePartAST, ASTNode ast, String tableName, HashMap partSpec) - throws SemanticException { + throws HiveException { AlterTablePartMergeFilesDesc mergeDesc = new AlterTablePartMergeFilesDesc( tableName, partSpec); @@ -1495,12 +1491,12 @@ private void analyzeAlterTablePartMergeFiles(ASTNode tablePartAST, ASTNode ast, rootTasks.add(mergeTask); } catch (Exception e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } } private void analyzeAlterTableClusterSort(ASTNode ast, String tableName, - HashMap partSpec) throws SemanticException { + HashMap partSpec) throws HiveException { addInputsOutputsAlterTable(tableName, partSpec); AlterTableDesc alterTblDesc; @@ -1638,7 +1634,7 @@ static public String getDBName(Hive db, ASTNode ast) { // get Table Name static public String getTableName(Hive db, ASTNode ast) - throws SemanticException { + throws HiveException { String tableName = null; String fullyQualifiedName = getFullyQualifiedName(ast); @@ -1717,7 +1713,7 @@ static public String getColPath( // get partition metadata static public Map getPartitionSpec(Hive db, ASTNode ast, String tableName) - throws SemanticException { + throws HiveException { // if ast has two children // it could be DESCRIBE table key // or DESCRIBE table partition @@ -1727,7 +1723,7 @@ static public String getColPath( HashMap partSpec = null; try { partSpec = getPartSpec(partNode); - } catch (SemanticException e) { + } catch (HiveException e) { // get exception in resolving partition // it could be DESCRIBE table key // return null @@ -1795,7 +1791,7 @@ private FetchTask createFetchTask(String schema) { return (FetchTask) TaskFactory.get(fetch, conf); } - private void validateDatabase(String databaseName) throws SemanticException { + private void validateDatabase(String databaseName) throws HiveException { try { if (!db.databaseExists(databaseName)) { throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(databaseName)); @@ -1806,14 +1802,14 @@ private void validateDatabase(String databaseName) throws SemanticException { } private void validateTable(String tableName, Map partSpec) - throws SemanticException { + throws HiveException { Table tab = getTable(tableName); if (partSpec != null) { getPartition(tab, partSpec, true); } } - private void analyzeDescribeTable(ASTNode ast) throws SemanticException { + private void analyzeDescribeTable(ASTNode ast) throws HiveException { ASTNode tableTypeExpr = (ASTNode) ast.getChild(0); String qualifiedName = @@ -1857,9 +1853,9 @@ private void analyzeDescribeTable(ASTNode ast) throws SemanticException { * Describe database. * * @param ast - * @throws SemanticException + * @throws HiveException */ - private void analyzeDescDatabase(ASTNode ast) throws SemanticException { + private void analyzeDescDatabase(ASTNode ast) throws HiveException { boolean isExtended; String dbName; @@ -1881,7 +1877,7 @@ private void analyzeDescDatabase(ASTNode ast) throws SemanticException { } public static HashMap getPartSpec(ASTNode partspec) - throws SemanticException { + throws HiveException { if (partspec == null) { return null; } @@ -1898,7 +1894,7 @@ private void analyzeDescDatabase(ASTNode ast) throws SemanticException { return partSpec; } - private void analyzeShowPartitions(ASTNode ast) throws SemanticException { + private void analyzeShowPartitions(ASTNode ast) throws HiveException { ShowPartitionsDesc showPartsDesc; String tableName = getUnescapedName((ASTNode) ast.getChild(0)); List> partSpecs = getPartitionSpecs(ast); @@ -1917,7 +1913,7 @@ private void analyzeShowPartitions(ASTNode ast) throws SemanticException { setFetchTask(createFetchTask(showPartsDesc.getSchema())); } - private void analyzeShowCreateTable(ASTNode ast) throws SemanticException { + private void analyzeShowCreateTable(ASTNode ast) throws HiveException { ShowCreateTableDesc showCreateTblDesc; String tableName = getUnescapedName((ASTNode)ast.getChild(0)); showCreateTblDesc = new ShowCreateTableDesc(tableName, ctx.getResFile().toString()); @@ -1933,7 +1929,7 @@ private void analyzeShowCreateTable(ASTNode ast) throws SemanticException { setFetchTask(createFetchTask(showCreateTblDesc.getSchema())); } - private void analyzeShowDatabases(ASTNode ast) throws SemanticException { + private void analyzeShowDatabases(ASTNode ast) throws HiveException { ShowDatabasesDesc showDatabasesDesc; if (ast.getChildCount() == 1) { String databasePattern = unescapeSQLString(ast.getChild(0).getText()); @@ -1945,7 +1941,7 @@ private void analyzeShowDatabases(ASTNode ast) throws SemanticException { setFetchTask(createFetchTask(showDatabasesDesc.getSchema())); } - private void analyzeShowTables(ASTNode ast) throws SemanticException { + private void analyzeShowTables(ASTNode ast) throws HiveException { ShowTablesDesc showTblsDesc; String dbName = SessionState.get().getCurrentDatabase(); String tableNames = null; @@ -1982,7 +1978,7 @@ private void analyzeShowTables(ASTNode ast) throws SemanticException { setFetchTask(createFetchTask(showTblsDesc.getSchema())); } - private void analyzeShowColumns(ASTNode ast) throws SemanticException { + private void analyzeShowColumns(ASTNode ast) throws HiveException { ShowColumnsDesc showColumnsDesc; String dbName = null; String tableName = null; @@ -2007,7 +2003,7 @@ private void analyzeShowColumns(ASTNode ast) throws SemanticException { setFetchTask(createFetchTask(showColumnsDesc.getSchema())); } - private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { + private void analyzeShowTableStatus(ASTNode ast) throws HiveException { ShowTableStatusDesc showTblStatusDesc; String tableNames = getUnescapedName((ASTNode) ast.getChild(0)); String dbName = SessionState.get().getCurrentDatabase(); @@ -2040,7 +2036,7 @@ private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { setFetchTask(createFetchTask(showTblStatusDesc.getSchema())); } - private void analyzeShowTableProperties(ASTNode ast) throws SemanticException { + private void analyzeShowTableProperties(ASTNode ast) throws HiveException { ShowTblPropertiesDesc showTblPropertiesDesc; String tableNames = getUnescapedName((ASTNode) ast.getChild(0)); String dbName = SessionState.get().getCurrentDatabase(); @@ -2058,7 +2054,7 @@ private void analyzeShowTableProperties(ASTNode ast) throws SemanticException { setFetchTask(createFetchTask(showTblPropertiesDesc.getSchema())); } - private void analyzeShowIndexes(ASTNode ast) throws SemanticException { + private void analyzeShowIndexes(ASTNode ast) throws HiveException { ShowIndexesDesc showIndexesDesc; String tableName = getUnescapedName((ASTNode) ast.getChild(0)); showIndexesDesc = new ShowIndexesDesc(tableName, ctx.getResFile()); @@ -2079,10 +2075,10 @@ private void analyzeShowIndexes(ASTNode ast) throws SemanticException { * * @param ast * The parsed command tree. - * @throws SemanticException + * @throws HiveException * Parsin failed */ - private void analyzeShowFunctions(ASTNode ast) throws SemanticException { + private void analyzeShowFunctions(ASTNode ast) throws HiveException { ShowFunctionsDesc showFuncsDesc; if (ast.getChildCount() == 1) { String funcNames = stripQuotes(ast.getChild(0).getText()); @@ -2101,10 +2097,10 @@ private void analyzeShowFunctions(ASTNode ast) throws SemanticException { * * @param ast * The parsed command tree. - * @throws SemanticException + * @throws HiveException * Parsing failed */ - private void analyzeShowLocks(ASTNode ast) throws SemanticException { + private void analyzeShowLocks(ASTNode ast) throws HiveException { String tableName = null; HashMap partSpec = null; boolean isExtended = false; @@ -2144,10 +2140,10 @@ private void analyzeShowLocks(ASTNode ast) throws SemanticException { * * @param ast * The parsed command tree. - * @throws SemanticException + * @throws HiveException * Parsing failed */ - private void analyzeShowDbLocks(ASTNode ast) throws SemanticException { + private void analyzeShowDbLocks(ASTNode ast) throws HiveException { boolean isExtended = (ast.getChildCount() > 1); String dbName = stripQuotes(ast.getChild(0).getText()); @@ -2167,11 +2163,11 @@ private void analyzeShowDbLocks(ASTNode ast) throws SemanticException { * * @param ast * The parsed command tree. - * @throws SemanticException + * @throws HiveException * Parsing failed */ private void analyzeLockTable(ASTNode ast) - throws SemanticException { + throws HiveException { String tableName = getUnescapedName((ASTNode) ast.getChild(0)).toLowerCase(); String mode = unescapeIdentifier(ast.getChild(1).getText().toUpperCase()); List> partSpecs = getPartitionSpecs(ast); @@ -2199,11 +2195,11 @@ private void analyzeLockTable(ASTNode ast) * * @param ast * The parsed command tree. - * @throws SemanticException + * @throws HiveException * Parsing failed */ private void analyzeUnlockTable(ASTNode ast) - throws SemanticException { + throws HiveException { String tableName = getUnescapedName((ASTNode) ast.getChild(0)); List> partSpecs = getPartitionSpecs(ast); @@ -2222,7 +2218,7 @@ private void analyzeUnlockTable(ASTNode ast) ctx.setNeedLockMgr(true); } - private void analyzeLockDatabase(ASTNode ast) throws SemanticException { + private void analyzeLockDatabase(ASTNode ast) throws HiveException { String dbName = unescapeIdentifier(ast.getChild(0).getText()); String mode = unescapeIdentifier(ast.getChild(1).getText().toUpperCase()); @@ -2236,7 +2232,7 @@ private void analyzeLockDatabase(ASTNode ast) throws SemanticException { ctx.setNeedLockMgr(true); } - private void analyzeUnlockDatabase(ASTNode ast) throws SemanticException { + private void analyzeUnlockDatabase(ASTNode ast) throws HiveException { String dbName = unescapeIdentifier(ast.getChild(0).getText()); UnlockDatabaseDesc unlockDatabaseDesc = new UnlockDatabaseDesc(dbName); @@ -2252,10 +2248,10 @@ private void analyzeUnlockDatabase(ASTNode ast) throws SemanticException { * * @param ast * The parsed command tree. - * @throws SemanticException + * @throws HiveException * Parsing failed */ - private void analyzeDescFunction(ASTNode ast) throws SemanticException { + private void analyzeDescFunction(ASTNode ast) throws HiveException { String funcName; boolean isExtended; @@ -2277,7 +2273,7 @@ private void analyzeDescFunction(ASTNode ast) throws SemanticException { } - private void analyzeAlterTableRename(ASTNode ast, boolean expectView) throws SemanticException { + private void analyzeAlterTableRename(ASTNode ast, boolean expectView) throws HiveException { String tblName = getUnescapedName((ASTNode) ast.getChild(0)); AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, getUnescapedName((ASTNode) ast.getChild(1)), expectView); @@ -2287,7 +2283,7 @@ private void analyzeAlterTableRename(ASTNode ast, boolean expectView) throws Sem alterTblDesc), conf)); } - private void analyzeAlterTableRenameCol(ASTNode ast) throws SemanticException { + private void analyzeAlterTableRenameCol(ASTNode ast) throws HiveException { String tblName = getUnescapedName((ASTNode) ast.getChild(0)); String newComment = null; String newType = null; @@ -2338,7 +2334,7 @@ private void analyzeAlterTableRenameCol(ASTNode ast) throws SemanticException { } private void analyzeAlterTableRenamePart(ASTNode ast, String tblName, - HashMap oldPartSpec) throws SemanticException { + HashMap oldPartSpec) throws HiveException { Map newPartSpec = extractPartitionSpecs((ASTNode) ast.getChild(0)); if (newPartSpec == null) { throw new SemanticException("RENAME PARTITION Missing Destination" + ast); @@ -2358,7 +2354,7 @@ private void analyzeAlterTableRenamePart(ASTNode ast, String tblName, } private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, - HashMap partSpec) throws SemanticException { + HashMap partSpec) throws HiveException { Table tab = getTable(tblName, true); if (tab.getBucketCols() == null || tab.getBucketCols().isEmpty()) { throw new SemanticException(ErrorMsg.ALTER_BUCKETNUM_NONBUCKETIZED_TBL.getMsg()); @@ -2374,7 +2370,7 @@ private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, } private void analyzeAlterTableModifyCols(ASTNode ast, - AlterTableTypes alterType) throws SemanticException { + AlterTableTypes alterType) throws HiveException { String tblName = getUnescapedName((ASTNode) ast.getChild(0)); List newCols = getColumns((ASTNode) ast.getChild(1)); AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, newCols, @@ -2386,7 +2382,7 @@ private void analyzeAlterTableModifyCols(ASTNode ast, } private void analyzeAlterTableDropParts(ASTNode ast, boolean expectView) - throws SemanticException { + throws HiveException { String tblName = getUnescapedName((ASTNode) ast.getChild(0)); // get table metadata @@ -2434,7 +2430,7 @@ private void analyzeAlterTableDropParts(ASTNode ast, boolean expectView) } private void analyzeAlterTableAlterParts(ASTNode ast) - throws SemanticException { + throws HiveException { // get table name String tblName = getUnescapedName((ASTNode)ast.getChild(0)); @@ -2496,11 +2492,11 @@ private void analyzeAlterTableAlterParts(ASTNode ast) * @param expectView * True for ALTER VIEW, false for ALTER TABLE. * - * @throws SemanticException + * @throws HiveException * Parsing failed */ private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) - throws SemanticException { + throws HiveException { // ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists? alterStatementSuffixAddPartitionsElement+) String tblName = getUnescapedName((ASTNode)ast.getChild(0)); @@ -2593,7 +2589,7 @@ private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) } private Partition getPartitionForOutput(Table tab, Map currentPart) - throws SemanticException { + throws HiveException { validatePartitionValues(currentPart); try { Partition partition = db.getPartition(tab, currentPart, false); @@ -2614,11 +2610,11 @@ private Partition getPartitionForOutput(Table tab, Map currentPa * * @param ast * The parsed command tree. - * @throws SemanticException + * @throws HiveException * Parsin failed */ private void analyzeAlterTableTouch(CommonTree ast) - throws SemanticException { + throws HiveException { String tblName = getUnescapedName((ASTNode)ast.getChild(0)); Table tab = getTable(tblName, true); @@ -2648,7 +2644,7 @@ private void analyzeAlterTableTouch(CommonTree ast) } private void analyzeAlterTableArchive(CommonTree ast, boolean isUnArchive) - throws SemanticException { + throws HiveException { if (!conf.getBoolVar(HiveConf.ConfVars.HIVEARCHIVEENABLED)) { throw new SemanticException(ErrorMsg.ARCHIVE_METHODS_DISABLED.getMsg()); @@ -2692,9 +2688,9 @@ private void analyzeAlterTableArchive(CommonTree ast, boolean isUnArchive) * * @param ast * Query tree. - * @throws SemanticException + * @throws HiveException */ - private void analyzeMetastoreCheck(CommonTree ast) throws SemanticException { + private void analyzeMetastoreCheck(CommonTree ast) throws HiveException { String tableName = null; boolean repair = false; if (ast.getChildCount() > 0) { @@ -2718,10 +2714,10 @@ private void analyzeMetastoreCheck(CommonTree ast) throws SemanticException { * @param ast * Tree to extract partitions from. * @return A list of partition name to value mappings. - * @throws SemanticException + * @throws HiveException */ private List> getPartitionSpecs(CommonTree ast) - throws SemanticException { + throws HiveException { List> partSpecs = new ArrayList>(); int childIndex = 0; // get partition metadata if partition specified @@ -2743,10 +2739,10 @@ private void analyzeMetastoreCheck(CommonTree ast) throws SemanticException { * Tree to extract partitions from. * @return A list of PartitionSpec objects which contain the mapping from * key to operator and value. - * @throws SemanticException + * @throws HiveException */ private List getFullPartitionSpecs(CommonTree ast) - throws SemanticException { + throws HiveException { List partSpecList = new ArrayList(); for (int childIndex = 1; childIndex < ast.getChildCount(); childIndex++) { @@ -2778,7 +2774,7 @@ private void analyzeMetastoreCheck(CommonTree ast) throws SemanticException { * since the reserved names are fairly long and uncommon. */ private void validatePartitionValues(Map partSpec) - throws SemanticException { + throws HiveException { for (Entry e : partSpec.entrySet()) { for (String s : reservedPartitionValues) { @@ -2795,7 +2791,7 @@ private void validatePartitionValues(Map partSpec) * pre-execution hook. If the partition does not exist, no error is thrown. */ private void addTablePartsOutputs(String tblName, List> partSpecs) - throws SemanticException { + throws HiveException { addTablePartsOutputs(tblName, partSpecs, false, false, null); } @@ -2805,7 +2801,7 @@ private void addTablePartsOutputs(String tblName, List> part */ private void addTablePartsOutputs(String tblName, List> partSpecs, boolean allowMany) - throws SemanticException { + throws HiveException { addTablePartsOutputs(tblName, partSpecs, false, allowMany, null); } @@ -2816,7 +2812,7 @@ private void addTablePartsOutputs(String tblName, List> part */ private void addTablePartsOutputs(String tblName, List> partSpecs, boolean throwIfNonExistent, boolean allowMany, ASTNode ast) - throws SemanticException { + throws HiveException { Table tab = getTable(tblName); Iterator> i; @@ -2859,7 +2855,7 @@ private void addTablePartsOutputs(String tblName, List> part */ private void addTableDropPartsOutputs(String tblName, List partSpecs, boolean throwIfNonExistent, boolean stringPartitionColumns, boolean ignoreProtection) - throws SemanticException { + throws HiveException { Table tab = getTable(tblName); Iterator i; @@ -2902,9 +2898,9 @@ private void addTableDropPartsOutputs(String tblName, List partSp * * @param ast * node - * @throws SemanticException + * @throws HiveException */ - private void analyzeAltertableSkewedby(ASTNode ast) throws SemanticException { + private void analyzeAltertableSkewedby(ASTNode ast) throws HiveException { /** * Throw an error if the user tries to use the DDL with * hive.internal.ddl.list.bucketing.enable set to false. @@ -2945,10 +2941,10 @@ private void analyzeAltertableSkewedby(ASTNode ast) throws SemanticException { * * @param tableName * @param tab - * @throws SemanticException + * @throws HiveException */ private void handleAlterTableDisableStoredAsDirs(String tableName, Table tab) - throws SemanticException { + throws HiveException { List skewedColNames = tab.getSkewedColNames(); List> skewedColValues = tab.getSkewedColValues(); if ((skewedColNames == null) || (skewedColNames.size() == 0) || (skewedColValues == null) @@ -2967,15 +2963,14 @@ private void handleAlterTableDisableStoredAsDirs(String tableName, Table tab) * @param ast * @param tableName * @param tab - * @throws SemanticException + * @throws HiveException */ private void handleAlterTableSkewedBy(ASTNode ast, String tableName, Table tab) - throws SemanticException { - List skewedColNames = new ArrayList(); + throws HiveException { List> skewedValues = new ArrayList>(); /* skewed column names. */ ASTNode skewedNode = (ASTNode) ast.getChild(1); - skewedColNames = analyzeSkewedTablDDLColNames(skewedColNames, skewedNode); + List skewedColNames = analyzeSkewedTablDDLColNames(skewedNode); /* skewed value. */ analyzeDDLSkewedValues(skewedValues, skewedNode); // stored as directories @@ -3000,10 +2995,10 @@ private void handleAlterTableSkewedBy(ASTNode ast, String tableName, Table tab) * @param skewedColNames * @param child * @return - * @throws SemanticException + * @throws HiveException */ private List analyzeAlterTableSkewedColNames(List skewedColNames, - ASTNode child) throws SemanticException { + ASTNode child) throws HiveException { Tree nNode = child.getChild(0); if (nNode == null) { throw new SemanticException(ErrorMsg.SKEWED_TABLE_NO_COLUMN_NAME.getMsg()); @@ -3045,10 +3040,10 @@ private void handleAlterTableSkewedBy(ASTNode ast, String tableName, Table tab) * @param ast * @param tableName * @param partSpec - * @throws SemanticException + * @throws HiveException */ private void analyzeAlterTableSkewedLocation(ASTNode ast, String tableName, - HashMap partSpec) throws SemanticException { + HashMap partSpec) throws HiveException { /** * Throw an error if the user tries to use the DDL with * hive.internal.ddl.list.bucketing.enable set to false. @@ -3151,7 +3146,7 @@ private boolean isConstant(ASTNode node) { return result; } - private void validateSkewedLocationString(String newLocation) throws SemanticException { + private void validateSkewedLocationString(String newLocation) throws HiveException { /* Validate location string. */ try { URI locUri = new URI(newLocation); @@ -3163,7 +3158,7 @@ private void validateSkewedLocationString(String newLocation) throws SemanticExc + "Please specify a complete absolute uri with scheme information."); } } catch (URISyntaxException e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java index 56999de..676d641 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.thrift.TDeserializer; import org.apache.thrift.TException; import org.apache.thrift.TSerializer; @@ -68,7 +69,7 @@ private EximUtil() { * Initialize the URI where the exported data collection is * to created for export, or is present for import */ - static URI getValidatedURI(HiveConf conf, String dcPath) throws SemanticException { + static URI getValidatedURI(HiveConf conf, String dcPath) throws HiveException { try { boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE); URI uri = new Path(dcPath).toUri(); @@ -122,7 +123,7 @@ static URI getValidatedURI(HiveConf conf, String dcPath) throws SemanticExceptio } } - static void validateTable(org.apache.hadoop.hive.ql.metadata.Table table) throws SemanticException { + static void validateTable(org.apache.hadoop.hive.ql.metadata.Table table) throws HiveException { if (table.isOffline()) { throw new SemanticException( ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg(":Table " @@ -136,7 +137,7 @@ static void validateTable(org.apache.hadoop.hive.ql.metadata.Table table) throws } } - public static String relativeToAbsolutePath(HiveConf conf, String location) throws SemanticException { + public static String relativeToAbsolutePath(HiveConf conf, String location) throws HiveException { boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE); if (testMode) { URI uri = new Path(location).toUri(); @@ -168,7 +169,7 @@ public static String relativeToAbsolutePath(HiveConf conf, String location) thro public static final String METADATA_FORMAT_FORWARD_COMPATIBLE_VERSION = null; public static void createExportDump(FileSystem fs, Path metadataPath, org.apache.hadoop.hive.ql.metadata.Table tableHandle, - List partitions) throws SemanticException, IOException { + List partitions) throws HiveException, IOException { try { JSONObject jsonContainer = new JSONObject(); jsonContainer.put("version", METADATA_FORMAT_VERSION); @@ -203,7 +204,7 @@ public static void createExportDump(FileSystem fs, Path metadataPath, org.apache public static Map.Entry> readMetaData(FileSystem fs, Path metadataPath) - throws IOException, SemanticException { + throws IOException, HiveException { FSDataInputStream mdstream = null; try { mdstream = fs.open(metadataPath); @@ -247,7 +248,7 @@ public static void createExportDump(FileSystem fs, Path metadataPath, org.apache } /* check the forward and backward compatibility */ - private static void checkCompatibility(String version, String fcVersion) throws SemanticException { + private static void checkCompatibility(String version, String fcVersion) throws HiveException { doCheckCompatibility( METADATA_FORMAT_VERSION, version, @@ -256,7 +257,7 @@ private static void checkCompatibility(String version, String fcVersion) throws /* check the forward and backward compatibility */ public static void doCheckCompatibility(String currVersion, - String version, String fcVersion) throws SemanticException { + String version, String fcVersion) throws HiveException { if (version == null) { throw new SemanticException(ErrorMsg.INVALID_METADATA.getMsg("Version number missing")); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java index eeee327..ccb7e90 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.ExplainWork; /** @@ -36,13 +37,13 @@ public class ExplainSemanticAnalyzer extends BaseSemanticAnalyzer { List fieldList; - public ExplainSemanticAnalyzer(HiveConf conf) throws SemanticException { + public ExplainSemanticAnalyzer(HiveConf conf) throws HiveException { super(conf); } @SuppressWarnings("unchecked") @Override - public void analyzeInternal(ASTNode ast) throws SemanticException { + public void analyzeInternal(ASTNode ast) throws HiveException { boolean extended = false; boolean formatted = false; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java index 7b2e2e6..20e716f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.plan.CopyWork; @@ -43,12 +44,12 @@ */ public class ExportSemanticAnalyzer extends BaseSemanticAnalyzer { - public ExportSemanticAnalyzer(HiveConf conf) throws SemanticException { + public ExportSemanticAnalyzer(HiveConf conf) throws HiveException { super(conf); } @Override - public void analyzeInternal(ASTNode ast) throws SemanticException { + public void analyzeInternal(ASTNode ast) throws HiveException { Tree tableTree = ast.getChild(0); Tree toTree = ast.getChild(1); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/FileSinkProcessor.java ql/src/java/org/apache/hadoop/hive/ql/parse/FileSinkProcessor.java index 9592992..df57afa 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/FileSinkProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/FileSinkProcessor.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; /** @@ -47,7 +48,7 @@ */ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) - throws SemanticException { + throws HiveException { GenTezProcContext context = (GenTezProcContext) procCtx; FileSinkOperator fileSink = (FileSinkOperator) nd; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java index da917f7..43d19bd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.CreateFunctionDesc; import org.apache.hadoop.hive.ql.plan.DropFunctionDesc; import org.apache.hadoop.hive.ql.plan.FunctionWork; @@ -36,12 +37,12 @@ private static final Log LOG = LogFactory .getLog(FunctionSemanticAnalyzer.class); - public FunctionSemanticAnalyzer(HiveConf conf) throws SemanticException { + public FunctionSemanticAnalyzer(HiveConf conf) throws HiveException { super(conf); } @Override - public void analyzeInternal(ASTNode ast) throws SemanticException { + public void analyzeInternal(ASTNode ast) throws HiveException { if (ast.getToken().getType() == HiveParser.TOK_CREATEFUNCTION) { analyzeCreateFunction(ast); } @@ -52,14 +53,14 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { LOG.info("analyze done"); } - private void analyzeCreateFunction(ASTNode ast) throws SemanticException { + private void analyzeCreateFunction(ASTNode ast) throws HiveException { String functionName = ast.getChild(0).getText(); String className = unescapeSQLString(ast.getChild(1).getText()); CreateFunctionDesc desc = new CreateFunctionDesc(functionName, className); rootTasks.add(TaskFactory.get(new FunctionWork(desc), conf)); } - private void analyzeDropFunction(ASTNode ast) throws SemanticException { + private void analyzeDropFunction(ASTNode ast) throws HiveException { String functionName = ast.getChild(0).getText(); boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null); // we want to signal an error if the function doesn't exist and we're diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/GenMapRedWalker.java ql/src/java/org/apache/hadoop/hive/ql/parse/GenMapRedWalker.java index 9583a1b..d593c5c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/GenMapRedWalker.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/GenMapRedWalker.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; import org.apache.hadoop.hive.ql.lib.Dispatcher; import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * Walks the operator tree in pre order fashion. @@ -46,7 +47,7 @@ public GenMapRedWalker(Dispatcher disp) { * operator being walked */ @Override - public void walk(Node nd) throws SemanticException { + public void walk(Node nd) throws HiveException { List children = nd.getChildren(); // maintain the stack of operators encountered diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java index 8363bbf..b3dcdc9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; import org.apache.hadoop.hive.ql.plan.BaseWork; import org.apache.hadoop.hive.ql.plan.MapWork; @@ -55,7 +56,7 @@ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procContext, Object... nodeOutputs) - throws SemanticException { + throws HiveException { GenTezProcContext context = (GenTezProcContext) procContext; @@ -232,7 +233,7 @@ protected void setupReduceSink(GenTezProcContext context, ReduceWork reduceWork, } protected MapWork createMapWork(GenTezProcContext context, Operator root, - TezWork tezWork) throws SemanticException { + TezWork tezWork) throws HiveException { assert root.getParentOperators().isEmpty(); MapWork mapWork = new MapWork("Map "+ (++sequenceNumber)); LOG.debug("Adding map work (" + mapWork.getName() + ") for " + root); @@ -251,7 +252,7 @@ protected MapWork createMapWork(GenTezProcContext context, Operator root, // this method's main use is to help unit testing this class protected void setupMapWork(MapWork mapWork, GenTezProcContext context, - Operator root, String alias) throws SemanticException { + Operator root, String alias) throws HiveException { // All the setup is done in GenMapRedUtils GenMapRedUtils.setMapWork(mapWork, context.parseContext, context.inputs, null, root, alias, context.conf, false); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWorkWalker.java ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWorkWalker.java index 08fd61e..c71c700 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWorkWalker.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWorkWalker.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; import org.apache.hadoop.hive.ql.lib.Dispatcher; import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.BaseWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; @@ -57,11 +58,11 @@ private void setRoot(Node nd) { /** * starting point for walking. * - * @throws SemanticException + * @throws HiveException */ @Override public void startWalking(Collection startNodes, - HashMap nodeOutput) throws SemanticException { + HashMap nodeOutput) throws HiveException { toWalk.addAll(startNodes); while (toWalk.size() > 0) { Node nd = toWalk.remove(0); @@ -79,7 +80,7 @@ public void startWalking(Collection startNodes, * @param nd operator being walked */ @Override - public void walk(Node nd) throws SemanticException { + public void walk(Node nd) throws HiveException { List children = nd.getChildren(); // maintain the stack of operators encountered diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java index d1e761b..79013c9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.hooks.Hook; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * HiveSemanticAnalyzerHook allows Hive to be extended with custom @@ -54,7 +55,7 @@ */ public ASTNode preAnalyze( HiveSemanticAnalyzerHookContext context, - ASTNode ast) throws SemanticException; + ASTNode ast) throws HiveException; /** * Invoked after Hive performs its own semantic analysis on a @@ -68,5 +69,5 @@ public ASTNode preAnalyze( */ public void postAnalyze( HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException; + List> rootTasks) throws HiveException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index ceb4c8a..b1309d0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -67,7 +67,7 @@ public static final String METADATA_NAME="_metadata"; - public ImportSemanticAnalyzer(HiveConf conf) throws SemanticException { + public ImportSemanticAnalyzer(HiveConf conf) throws HiveException { super(conf); } @@ -78,7 +78,7 @@ public boolean existsTable() { } @Override - public void analyzeInternal(ASTNode ast) throws SemanticException { + public void analyzeInternal(ASTNode ast) throws HiveException { try { Tree fromTree = ast.getChild(0); // initialize load path @@ -274,7 +274,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { //inputs.add(new ReadEntity(fromURI.toString(), // fromURI.getScheme().equals("hdfs") ? true : false)); } - } catch (SemanticException e) { + } catch (HiveException e) { throw e; } catch (Exception e) { throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg(), e); @@ -376,7 +376,7 @@ private static String partSpecToString(Map partSpec) { } private static void checkTable(Table table, CreateTableDesc tableDesc) - throws SemanticException, URISyntaxException { + throws HiveException, URISyntaxException { { EximUtil.validateTable(table); if (!table.isPartitioned()) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index a22a15f..16ce101 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -55,7 +55,7 @@ private boolean isLocal; private boolean isOverWrite; - public LoadSemanticAnalyzer(HiveConf conf) throws SemanticException { + public LoadSemanticAnalyzer(HiveConf conf) throws HiveException { super(conf); } @@ -113,7 +113,7 @@ private URI initializeFromURI(String fromPath) throws IOException, } private void applyConstraints(URI fromURI, URI toURI, Tree ast, - boolean isLocal) throws SemanticException { + boolean isLocal) throws HiveException { // local mode implies that scheme should be "file" // we can change this going forward @@ -155,7 +155,7 @@ private void applyConstraints(URI fromURI, URI toURI, Tree ast, } @Override - public void analyzeInternal(ASTNode ast) throws SemanticException { + public void analyzeInternal(ASTNode ast) throws HiveException { isLocal = false; isOverWrite = false; Tree fromTree = ast.getChild(0); @@ -239,19 +239,15 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { partSpec = new LinkedHashMap(); outputs.add(new WriteEntity(ts.tableHandle)); } else { - try{ - Partition part = Hive.get().getPartition(ts.tableHandle, partSpec, false); - if (part != null) { - if (part.isOffline()) { - throw new SemanticException(ErrorMsg.OFFLINE_TABLE_OR_PARTITION. - getMsg(ts.tableName + ":" + part.getName())); - } - outputs.add(new WriteEntity(part)); - } else { - outputs.add(new WriteEntity(ts.tableHandle)); + Partition part = Hive.get().getPartition(ts.tableHandle, partSpec, false); + if (part != null) { + if (part.isOffline()) { + throw new SemanticException(ErrorMsg.OFFLINE_TABLE_OR_PARTITION. + getMsg(ts.tableName + ":" + part.getName())); } - } catch(HiveException e) { - throw new SemanticException(e); + outputs.add(new WriteEntity(part)); + } else { + outputs.add(new WriteEntity(ts.tableHandle)); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java index b42a425..51ad3ae 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hive.ql.lib.Dispatcher; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.PreOrderWalker; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.CreateMacroDesc; import org.apache.hadoop.hive.ql.plan.DropMacroDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -54,12 +55,12 @@ private static final Log LOG = LogFactory .getLog(MacroSemanticAnalyzer.class); - public MacroSemanticAnalyzer(HiveConf conf) throws SemanticException { + public MacroSemanticAnalyzer(HiveConf conf) throws HiveException { super(conf); } @Override - public void analyzeInternal(ASTNode ast) throws SemanticException { + public void analyzeInternal(ASTNode ast) throws HiveException { if (ast.getToken().getType() == HiveParser.TOK_CREATEMACRO) { LOG.debug("Analyzing create macro " + ast.dump()); analyzeCreateMacro(ast); @@ -71,7 +72,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { } @SuppressWarnings("unchecked") - private void analyzeCreateMacro(ASTNode ast) throws SemanticException { + private void analyzeCreateMacro(ASTNode ast) throws HiveException { String functionName = ast.getChild(0).getText(); List arguments = BaseSemanticAnalyzer.getColumns((ASTNode)ast.getChild(1), true); @@ -88,7 +89,7 @@ private void analyzeCreateMacro(ASTNode ast) throws SemanticException { PreOrderWalker walker = new PreOrderWalker(new Dispatcher() { @Override public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) - throws SemanticException { + throws HiveException { if(nd instanceof ASTNode) { ASTNode node = (ASTNode)nd; if(node.getType() == HiveParser.TOK_TABLE_OR_COL) { @@ -129,7 +130,7 @@ public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) } @SuppressWarnings("unchecked") - private void analyzeDropMacro(ASTNode ast) throws SemanticException { + private void analyzeDropMacro(ASTNode ast) throws HiveException { String functionName = ast.getChild(0).getText(); boolean ifExists = (ast.getFirstChildWithType(TOK_IFEXISTS) != null); // we want to signal an error if the function doesn't exist and we're diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java index 76f5a31..6bbbbf0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.GenMRFileSink1; import org.apache.hadoop.hive.ql.optimizer.GenMROperator; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext; @@ -196,7 +197,7 @@ private static int getNumberOfReducers(MapredWork mrwork, HiveConf conf) { @Override protected void decideExecMode(List> rootTasks, Context ctx, GlobalLimitCtx globalLimitCtx) - throws SemanticException { + throws HiveException { // bypass for explain queries for now if (ctx.getExplain()) { @@ -258,7 +259,7 @@ public boolean accept(Path file) { mrtask.setLocalMode(true); } } catch (IOException e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } } @@ -273,7 +274,7 @@ public boolean accept(Path file) { @Override protected void optimizeTaskPlan(List> rootTasks, - ParseContext pCtx, Context ctx) throws SemanticException { + ParseContext pCtx, Context ctx) throws HiveException { // reduce sink does not have any kids - since the plan by now has been // broken up into multiple // tasks, iterate over all tasks. @@ -293,7 +294,7 @@ protected void optimizeTaskPlan(List> rootTasks, @Override protected void generateTaskTree(List> rootTasks, ParseContext pCtx, - List> mvTask, Set inputs, Set outputs) throws SemanticException { + List> mvTask, Set inputs, Set outputs) throws HiveException { // generate map reduce plans ParseContext tempParseContext = getParseContext(pCtx, rootTasks); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/MetaDataExportListener.java ql/src/java/org/apache/hadoop/hive/ql/parse/MetaDataExportListener.java index a16c7dc..07bb596 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/MetaDataExportListener.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/MetaDataExportListener.java @@ -90,9 +90,7 @@ private void export_meta_data(PreDropTableEvent tableEvent) throws MetaException if (moveMetadataToTrash == true) { wh.deleteDir(metaPath, true); } - } catch (IOException e) { - throw new MetaException(e.getMessage()); - } catch (SemanticException e) { + } catch (Exception e) { throw new MetaException(e.getMessage()); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java index f011258..e3b1c71 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java @@ -129,7 +129,7 @@ public PTFDesc translate(PTFInvocationSpec qSpec, HiveConf hCfg, RowResolver inputRR, UnparseTranslator unparseT) - throws SemanticException { + throws HiveException { init(semAly, hCfg, inputRR, unparseT); ptfInvocation = qSpec; ptfDesc = new PTFDesc(); @@ -142,7 +142,7 @@ public PTFDesc translate(PTFInvocationSpec qSpec, public PTFDesc translate(WindowingSpec wdwSpec, SemanticAnalyzer semAly, HiveConf hCfg, RowResolver inputRR, UnparseTranslator unparseT) - throws SemanticException { + throws HiveException { init(semAly, hCfg, inputRR, unparseT); windowingSpec = wdwSpec; ptfDesc = new PTFDesc(); @@ -224,7 +224,7 @@ public PTFDesc translate(WindowingSpec wdwSpec, SemanticAnalyzer semAly, HiveCon return ptfDesc; } - private void translatePTFChain() throws SemanticException { + private void translatePTFChain() throws HiveException { Deque ptfChain = new ArrayDeque(); PTFInputSpec currentSpec = ptfInvocation.getFunction(); @@ -252,7 +252,7 @@ private void translatePTFChain() throws SemanticException { } private PTFQueryInputDef translate(PTFQueryInputSpec spec, - int inpNum) throws SemanticException + int inpNum) throws HiveException { PTFQueryInputDef def = new PTFQueryInputDef(); StructObjectInspector oi = PTFTranslator.getStandardStructOI(inputRR); @@ -266,7 +266,7 @@ private PTFQueryInputDef translate(PTFQueryInputSpec spec, private PartitionedTableFunctionDef translate(PartitionedTableFunctionSpec spec, PTFInputDef inpDef, int inpNum) - throws SemanticException { + throws HiveException { TableFunctionResolver tFn = FunctionRegistry.getTableFunctionResolver(spec.getName()); if (tFn == null) { throw new SemanticException(String.format("Unknown Table Function %s", @@ -287,12 +287,7 @@ private PartitionedTableFunctionDef translate(PartitionedTableFunctionSpec spec, { for (ASTNode expr : args) { - PTFExpressionDef argDef = null; - try { - argDef = buildExpressionDef(inpDef.getOutputShape(), expr); - } catch (HiveException he) { - throw new SemanticException(he); - } + PTFExpressionDef argDef = buildExpressionDef(inpDef.getOutputShape(), expr); def.addArg(argDef); } } @@ -343,7 +338,7 @@ private PartitionedTableFunctionDef translate(PartitionedTableFunctionSpec spec, } private WindowFunctionDef translate(WindowTableFunctionDef wdwTFnDef, - WindowFunctionSpec spec) throws SemanticException { + WindowFunctionSpec spec) throws HiveException { WindowFunctionInfo wFnInfo = FunctionRegistry.getWindowFunctionInfo(spec.getName()); WindowFunctionDef def = new WindowFunctionDef(); def.setName(spec.getName()); @@ -362,12 +357,7 @@ private WindowFunctionDef translate(WindowTableFunctionDef wdwTFnDef, { for (ASTNode expr : args) { - PTFExpressionDef argDef = null; - try { - argDef = buildExpressionDef(inpShape, expr); - } catch (HiveException he) { - throw new SemanticException(he); - } + PTFExpressionDef argDef = buildExpressionDef(inpShape, expr); def.addArg(argDef); } } @@ -399,18 +389,14 @@ private WindowFunctionDef translate(WindowTableFunctionDef wdwTFnDef, def.setWindowFrame(wdwFrame); } - try { - setupWdwFnEvaluator(def); - } catch (HiveException he) { - throw new SemanticException(he); - } + setupWdwFnEvaluator(def); return def; } private void translatePartitioning(PartitionedTableFunctionDef def, PartitionedTableFunctionSpec spec) - throws SemanticException { + throws HiveException { applyConstantPartition(spec); if (spec.getPartition() == null) { @@ -441,7 +427,7 @@ private static void applyConstantPartition(PartitionedTableFunctionSpec spec) { } private PartitionDef translate(ShapeDetails inpShape, PartitionSpec spec) - throws SemanticException { + throws HiveException { if (spec == null || spec.getExpressions() == null || spec.getExpressions().size() == 0) { return null; } @@ -455,13 +441,8 @@ private PartitionDef translate(ShapeDetails inpShape, PartitionSpec spec) } private PTFExpressionDef translate(ShapeDetails inpShape, - PartitionExpression pExpr) throws SemanticException { - PTFExpressionDef expDef = null; - try { - expDef = buildExpressionDef(inpShape, pExpr.getExpression()); - } catch (HiveException he) { - throw new SemanticException(he); - } + PartitionExpression pExpr) throws HiveException { + PTFExpressionDef expDef = buildExpressionDef(inpShape, pExpr.getExpression()); PTFTranslator.validateComparable(expDef.getOI(), String.format("Partition Expression %s is not a comparable expression", pExpr .getExpression().toStringTree())); @@ -470,7 +451,7 @@ private PTFExpressionDef translate(ShapeDetails inpShape, private OrderDef translate(ShapeDetails inpShape, OrderSpec spec, - PartitionDef partitionDef) throws SemanticException { + PartitionDef partitionDef) throws HiveException { OrderDef def = new OrderDef(); if (null == spec) { @@ -488,18 +469,16 @@ private OrderDef translate(ShapeDetails inpShape, private OrderExpressionDef translate(ShapeDetails inpShape, OrderExpression oExpr) - throws SemanticException { + throws HiveException { OrderExpressionDef oexpDef = new OrderExpressionDef(); oexpDef.setOrder(oExpr.getOrder()); - try { - PTFExpressionDef expDef = buildExpressionDef(inpShape, oExpr.getExpression()); - oexpDef.setExpressionTreeString(expDef.getExpressionTreeString()); - oexpDef.setExprEvaluator(expDef.getExprEvaluator()); - oexpDef.setExprNode(expDef.getExprNode()); - oexpDef.setOI(expDef.getOI()); - } catch (HiveException he) { - throw new SemanticException(he); - } + + PTFExpressionDef expDef = buildExpressionDef(inpShape, oExpr.getExpression()); + oexpDef.setExpressionTreeString(expDef.getExpressionTreeString()); + oexpDef.setExprEvaluator(expDef.getExprEvaluator()); + oexpDef.setExprNode(expDef.getExprNode()); + oexpDef.setOI(expDef.getOI()); + PTFTranslator.validateComparable(oexpDef.getOI(), String.format("Partition Expression %s is not a comparable expression", oExpr.getExpression().toStringTree())); @@ -507,7 +486,7 @@ private OrderExpressionDef translate(ShapeDetails inpShape, } private WindowFrameDef translate(String wFnName, ShapeDetails inpShape, WindowSpec spec) - throws SemanticException { + throws HiveException { /* * Since we componentize Windowing, no need to translate * the Partition & Order specs of individual WFns. @@ -517,7 +496,7 @@ private WindowFrameDef translate(String wFnName, ShapeDetails inpShape, WindowSp private WindowFrameDef translate(ShapeDetails inpShape, WindowFrameSpec spec) - throws SemanticException { + throws HiveException { if (spec == null) { return null; } @@ -537,19 +516,14 @@ private WindowFrameDef translate(ShapeDetails inpShape, } private BoundaryDef translate(ShapeDetails inpShape, BoundarySpec bndSpec) - throws SemanticException { + throws HiveException { if (bndSpec instanceof ValueBoundarySpec) { ValueBoundarySpec vBndSpec = (ValueBoundarySpec) bndSpec; ValueBoundaryDef vbDef = new ValueBoundaryDef(); vbDef.setAmt(vBndSpec.getAmt()); vbDef.setDirection(vBndSpec.getDirection()); PTFTranslator.validateNoLeadLagInValueBoundarySpec(vBndSpec.getExpression()); - PTFExpressionDef exprDef = null; - try { - exprDef = buildExpressionDef(inpShape, vBndSpec.getExpression()); - } catch (HiveException he) { - throw new SemanticException(he); - } + PTFExpressionDef exprDef = buildExpressionDef(inpShape, vBndSpec.getExpression()); PTFTranslator.validateValueBoundaryExprType(exprDef.getOI()); vbDef.setExpressionDef(exprDef); return vbDef; @@ -589,7 +563,7 @@ static void setupWdwFnEvaluator(WindowFunctionDef def) throws HiveException { } private static void validateValueBoundaryExprType(ObjectInspector OI) - throws SemanticException { + throws HiveException { if (!OI.getCategory().equals(Category.PRIMITIVE)) { throw new SemanticException( String.format( @@ -622,7 +596,7 @@ private static void validateValueBoundaryExprType(ObjectInspector OI) private ShapeDetails setupTableFnShape(String fnName, ShapeDetails inpShape, StructObjectInspector OI, List columnNames, RowResolver rr) - throws SemanticException { + throws HiveException { if (fnName.equals(FunctionRegistry.NOOP_TABLE_FUNCTION) || fnName.equals( FunctionRegistry.NOOP_MAP_TABLE_FUNCTION)) { @@ -633,7 +607,7 @@ private ShapeDetails setupTableFnShape(String fnName, ShapeDetails inpShape, private ShapeDetails setupShape(StructObjectInspector OI, List columnNames, - RowResolver rr) throws SemanticException { + RowResolver rr) throws HiveException { Map serdePropsMap = new LinkedHashMap(); SerDe serde = null; ShapeDetails shp = new ShapeDetails(); @@ -643,7 +617,7 @@ private ShapeDetails setupShape(StructObjectInspector OI, StructObjectInspector outOI = PTFPartition.setupPartitionOutputOI(serde, OI); shp.setOI(outOI); } catch (SerDeException se) { - throw new SemanticException(se); + throw HiveException.wrap(se); } shp.setRr(rr); @@ -675,7 +649,7 @@ private ShapeDetails copyShape(ShapeDetails src) { private ShapeDetails setupShapeForNoop(ShapeDetails inpShape, StructObjectInspector OI, List columnNames, - RowResolver rr) throws SemanticException { + RowResolver rr) throws HiveException { ShapeDetails shp = new ShapeDetails(); shp.setRr(rr); @@ -694,7 +668,7 @@ private ShapeDetails setupShapeForNoop(ShapeDetails inpShape, protected static ArrayList addPartitionExpressionsToOrderList( ArrayList partCols, - ArrayList orderCols) throws SemanticException { + ArrayList orderCols) throws HiveException { int numOfPartColumns = 0; int chkSize = partCols.size(); @@ -735,7 +709,7 @@ private ShapeDetails setupShapeForNoop(ShapeDetails inpShape, private void setupRankingArgs(WindowTableFunctionDef wdwTFnDef, WindowFunctionDef wFnDef, WindowFunctionSpec wSpec) - throws SemanticException { + throws HiveException { if (wSpec.getArgs().size() > 0) { throw new SemanticException("Ranking Functions can take no arguments"); } @@ -852,7 +826,7 @@ public static StructObjectInspector getStandardStructOI(RowResolver rr) { } protected static void validateComparable(ObjectInspector OI, String errMsg) - throws SemanticException { + throws HiveException { if (!ObjectInspectorUtils.compareSupported(OI)) { throw new SemanticException(errMsg); } @@ -873,7 +847,7 @@ private static void addInputColumnsToList(ShapeDetails shape, protected static RowResolver buildRowResolverForPTF(String tbFnName, String tabAlias, StructObjectInspector rowObjectInspector, - List outputColNames, RowResolver inputRR) throws SemanticException { + List outputColNames, RowResolver inputRR) throws HiveException { if (tbFnName.equals(FunctionRegistry.NOOP_TABLE_FUNCTION) || tbFnName.equals(FunctionRegistry.NOOP_MAP_TABLE_FUNCTION)) { @@ -894,7 +868,7 @@ protected static RowResolver buildRowResolverForPTF(String tbFnName, String tabA } protected RowResolver buildRowResolverForWindowing(WindowTableFunctionDef def) - throws SemanticException { + throws HiveException { RowResolver rr = new RowResolver(); HashMap aliasToExprMap = windowingSpec.getAliasToWdwExpr(); @@ -947,7 +921,7 @@ protected RowResolver buildRowResolverForWindowing(WindowTableFunctionDef def) protected static RowResolver buildRowResolverForNoop(String tabAlias, StructObjectInspector rowObjectInspector, - RowResolver inputRowResolver) throws SemanticException { + RowResolver inputRowResolver) throws HiveException { LOG.info("QueryTranslationInfo::getRowResolver invoked on ObjectInspector"); RowResolver rwsch = new RowResolver(); List fields = rowObjectInspector.getAllStructFieldRefs(); @@ -1002,7 +976,7 @@ protected static RowResolver buildRowResolverForNoop(String tabAlias, /* * If the cInfo is for an ASTNode, this function returns the ASTNode that it is for. */ - public static ASTNode getASTNode(ColumnInfo cInfo, RowResolver rr) throws SemanticException { + public static ASTNode getASTNode(ColumnInfo cInfo, RowResolver rr) throws HiveException { for (Map.Entry entry : rr.getExpressionMap().entrySet()) { ASTNode expr = entry.getValue(); if (rr.getExpression(expr).equals(cInfo)) { @@ -1033,7 +1007,7 @@ private static void _visit(Object t, Object parent, int childIndex, ContextVisit } public static ArrayList componentize(PTFInvocationSpec ptfInvocation) - throws SemanticException { + throws HiveException { ArrayList componentInvocations = new ArrayList(); @@ -1105,7 +1079,7 @@ private static void _visit(Object t, Object parent, int childIndex, ContextVisit } public static void validateNoLeadLagInValueBoundarySpec(ASTNode node) - throws SemanticException { + throws HiveException { String errMsg = "Lead/Lag not allowed in ValueBoundary Spec"; TreeWizard tw = new TreeWizard(ParseDriver.adaptor, HiveParser.tokenNames); ValidateNoLeadLag visitor = new ValidateNoLeadLag(errMsg); @@ -1135,7 +1109,7 @@ public void visit(Object t, Object parent, int childIndex, Map labels) { } } - void checkValid() throws SemanticException { + void checkValid() throws HiveException { if (throwError) { throw new SemanticException(errMsg + errorNode.toStringTree()); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java index 0af2536..2a19e9f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java @@ -25,9 +25,8 @@ import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; -import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; -import org.apache.hadoop.hive.serde2.typeinfo.BaseCharTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; @@ -82,7 +81,7 @@ private ParseUtils() { } public static List validateColumnNameUniqueness( - List fieldSchemas) throws SemanticException { + List fieldSchemas) throws HiveException { // no duplicate column names // currently, it is a simple n*n algorithm - this can be optimized later if @@ -112,7 +111,7 @@ private ParseUtils() { * @return Expression converting column to the type specified by tableFieldTypeInfo */ static ExprNodeDesc createConversionCast(ExprNodeDesc column, PrimitiveTypeInfo tableFieldTypeInfo) - throws SemanticException { + throws HiveException { // Get base type, since type string may be parameterized String baseType = TypeInfoUtils.getBaseName(tableFieldTypeInfo.getTypeName()); @@ -125,7 +124,7 @@ static ExprNodeDesc createConversionCast(ExprNodeDesc column, PrimitiveTypeInfo } public static VarcharTypeInfo getVarcharTypeInfo(ASTNode node) - throws SemanticException { + throws HiveException { if (node.getChildCount() != 1) { throw new SemanticException("Bad params for type varchar"); } @@ -135,7 +134,7 @@ public static VarcharTypeInfo getVarcharTypeInfo(ASTNode node) } public static CharTypeInfo getCharTypeInfo(ASTNode node) - throws SemanticException { + throws HiveException { if (node.getChildCount() != 1) { throw new SemanticException("Bad params for type char"); } @@ -195,7 +194,7 @@ static int checkJoinFilterRefersOneAlias(String[] tabAliases, ASTNode filterCond } public static DecimalTypeInfo getDecimalTypeTypeInfo(ASTNode node) - throws SemanticException { + throws HiveException { if (node.getChildCount() > 2) { throw new SemanticException("Bad params for type decimal"); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/PrintOpTreeProcessor.java ql/src/java/org/apache/hadoop/hive/ql/parse/PrintOpTreeProcessor.java index eb6b83a..b954f59 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/PrintOpTreeProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/PrintOpTreeProcessor.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.OperatorDesc; /** @@ -80,7 +81,7 @@ private String getChildren(Operator op) { } public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { Operator op = (Operator) nd; if (opMap.get(op) == null) { opMap.put(op, curNum++); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java index 92ccbea..32bb620 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java @@ -11,6 +11,7 @@ import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SubQueryUtils.ISubQueryJoinInfo; import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory.DefaultExprProcessor; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; @@ -26,7 +27,7 @@ IN, NOT_IN; - public static SubQueryType get(ASTNode opNode) throws SemanticException { + public static SubQueryType get(ASTNode opNode) throws HiveException { switch(opNode.getType()) { case HiveParser.KW_EXISTS: return EXISTS; @@ -246,7 +247,7 @@ ColumnInfo getRightOuterColInfo() { if ( cInfo != null) { return ObjectPair.create(ExprType.REFERS_PARENT, cInfo); } - } catch(SemanticException se) { + } catch(HiveException se) { } } if ( expr.getType() == HiveParser.DOT) { @@ -280,7 +281,7 @@ ColumnInfo getRightOuterColInfo() { * * @return Conjunct contains details on the left and right side of the conjunct expression. */ - Conjunct analyzeConjunct(ASTNode conjunct) throws SemanticException { + Conjunct analyzeConjunct(ASTNode conjunct) throws HiveException { int type = conjunct.getType(); if ( type == HiveParser.EQUAL ) { @@ -305,17 +306,17 @@ Conjunct analyzeConjunct(ASTNode conjunct) throws SemanticException { * Apply this logic on the leftmost(first) dot in an AST tree. */ protected ColumnInfo resolveDot(ASTNode node) { + TypeCheckCtx tcCtx = new TypeCheckCtx(parentQueryRR); + String str = BaseSemanticAnalyzer.unescapeIdentifier(node.getChild(1).getText()); + ExprNodeDesc idDesc = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, str); try { - TypeCheckCtx tcCtx = new TypeCheckCtx(parentQueryRR); - String str = BaseSemanticAnalyzer.unescapeIdentifier(node.getChild(1).getText()); - ExprNodeDesc idDesc = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, str); - ExprNodeColumnDesc colDesc = (ExprNodeColumnDesc) - defaultExprProcessor.process(node, stack, tcCtx, (Object) null, idDesc); - if ( colDesc != null ) { - String[] qualName = parentQueryRR.reverseLookup(colDesc.getColumn()); - return parentQueryRR.get(qualName[0], qualName[1]); - } - } catch(SemanticException se) { + ExprNodeColumnDesc colDesc = (ExprNodeColumnDesc) + defaultExprProcessor.process(node, stack, tcCtx, null, idDesc); + if ( colDesc != null ) { + String[] qualName = parentQueryRR.reverseLookup(colDesc.getColumn()); + return parentQueryRR.get(qualName[0], qualName[1]); + } + } catch(HiveException se) { } return null; } @@ -481,7 +482,7 @@ public SubQueryTypeDef getOperator() { void validateAndRewriteAST(RowResolver outerQueryRR, boolean forHavingClause, String outerQueryAlias, - Set outerQryAliases) throws SemanticException { + Set outerQryAliases) throws HiveException { ASTNode selectClause = (ASTNode) subQueryAST.getChild(1).getChild(1); @@ -612,7 +613,7 @@ private void setJoinType() { void buildJoinCondition(RowResolver outerQueryRR, RowResolver sqRR, boolean forHavingClause, - String outerQueryAlias) throws SemanticException { + String outerQueryAlias) throws HiveException { ASTNode parentQueryJoinCond = null; if ( parentQueryExpression != null ) { @@ -718,7 +719,7 @@ String getNextCorrExprAlias() { */ private void rewrite(RowResolver parentQueryRR, boolean forHavingClause, - String outerQueryAlias) throws SemanticException { + String outerQueryAlias) throws HiveException { ASTNode selectClause = (ASTNode) subQueryAST.getChild(1).getChild(1); ASTNode whereClause = SubQueryUtils.subQueryWhere(subQueryAST); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java index f142f3e..13db140 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.RowSchema; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * Implementation of the Row Resolver. @@ -84,7 +85,7 @@ public void putExpression(ASTNode node, ColumnInfo colInfo) { * Retrieves the ColumnInfo corresponding to a source expression which * exactly matches the string rendering of the given ASTNode. */ - public ColumnInfo getExpression(ASTNode node) throws SemanticException { + public ColumnInfo getExpression(ASTNode node) throws HiveException { return get("", node.toStringTree()); } @@ -155,9 +156,9 @@ public boolean hasTableAlias(String tab_alias) { * @param col_alias * The column name that is being searched for * @return ColumnInfo - * @throws SemanticException + * @throws HiveException */ - public ColumnInfo get(String tab_alias, String col_alias) throws SemanticException { + public ColumnInfo get(String tab_alias, String col_alias) throws HiveException { col_alias = col_alias.toLowerCase(); ColumnInfo ret = null; @@ -197,7 +198,7 @@ public ColumnInfo get(String tab_alias, String col_alias) throws SemanticExcepti /** * check if column name is already exist in RR */ - public void checkColumn(String tableAlias, String columnAlias) throws SemanticException { + public void checkColumn(String tableAlias, String columnAlias) throws HiveException { ColumnInfo prev = get(null, columnAlias); if (prev != null && (tableAlias == null || !tableAlias.equalsIgnoreCase(prev.getTabAlias()))) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 588339e..15d7bb3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.ql.parse; import java.io.Serializable; -import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -244,7 +243,7 @@ int nextNum; } - public SemanticAnalyzer(HiveConf conf) throws SemanticException { + public SemanticAnalyzer(HiveConf conf) throws HiveException { super(conf); opToPartPruner = new HashMap(); @@ -332,7 +331,7 @@ public ParseContext getParseContext() { @SuppressWarnings("nls") public void doPhase1QBExpr(ASTNode ast, QBExpr qbexpr, String id, String alias) - throws SemanticException { + throws HiveException { assert (ast.getToken() != null); switch (ast.getToken().getType()) { @@ -366,7 +365,7 @@ public void doPhase1QBExpr(ASTNode ast, QBExpr qbexpr, String id, String alias) } private LinkedHashMap doPhase1GetAggregationsFromSelect( - ASTNode selExpr, QB qb, String dest) throws SemanticException { + ASTNode selExpr, QB qb, String dest) throws HiveException { // Iterate over the selects search for aggregation Trees. // Use String as keys to eliminate duplicate trees. @@ -422,10 +421,10 @@ private void doPhase1GetColumnAliasesFromSelect( * @param aggregations * the key to the HashTable is the toStringTree() representation of * the aggregation subtree. - * @throws SemanticException + * @throws HiveException */ private void doPhase1GetAllAggregations(ASTNode expressionTree, - HashMap aggregations, List wdwFns) throws SemanticException { + HashMap aggregations, List wdwFns) throws HiveException { int exprTokenType = expressionTree.getToken().getType(); if (exprTokenType == HiveParser.TOK_FUNCTION || exprTokenType == HiveParser.TOK_FUNCTIONDI @@ -463,7 +462,7 @@ private void doPhase1GetAllAggregations(ASTNode expressionTree, } private List doPhase1GetDistinctFuncExprs( - HashMap aggregationTrees) throws SemanticException { + HashMap aggregationTrees) throws HiveException { List exprs = new ArrayList(); for (Map.Entry entry : aggregationTrees.entrySet()) { ASTNode value = entry.getValue(); @@ -495,7 +494,7 @@ public static String generateErrorMessage(ASTNode ast, String message) { * * @return the alias of the table */ - private String processTable(QB qb, ASTNode tabref) throws SemanticException { + private String processTable(QB qb, ASTNode tabref) throws HiveException { // For each table reference get the table name // and the alias (if alias is not present, the table name // is used as an alias) @@ -619,7 +618,7 @@ private String processTable(QB qb, ASTNode tabref) throws SemanticException { return alias; } - private void assertCombineInputFormat(Tree numerator, String message) throws SemanticException { + private void assertCombineInputFormat(Tree numerator, String message) throws HiveException { String inputFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEINPUTFORMAT); if (!inputFormat.equals(CombineHiveInputFormat.class.getName())) { throw new SemanticException(generateErrorMessage((ASTNode) numerator, @@ -627,7 +626,7 @@ private void assertCombineInputFormat(Tree numerator, String message) throws Sem } } - private String processSubQuery(QB qb, ASTNode subq) throws SemanticException { + private String processSubQuery(QB qb, ASTNode subq) throws HiveException { // This is a subquery and must have an alias if (subq.getChildCount() != 2) { @@ -675,10 +674,10 @@ private boolean isJoinToken(ASTNode node) { * * @param qb * @param join - * @throws SemanticException + * @throws HiveException */ @SuppressWarnings("nls") - private void processJoin(QB qb, ASTNode join) throws SemanticException { + private void processJoin(QB qb, ASTNode join) throws HiveException { int numChildren = join.getChildCount(); if ((numChildren != 2) && (numChildren != 3) && join.getToken().getType() != HiveParser.TOK_UNIQUEJOIN) { @@ -725,11 +724,11 @@ private void processJoin(QB qb, ASTNode join) throws SemanticException { * @param qb * @param lateralView * @return the alias for the table/subquery - * @throws SemanticException + * @throws HiveException */ private String processLateralView(QB qb, ASTNode lateralView) - throws SemanticException { + throws HiveException { int numChildren = lateralView.getChildCount(); assert (numChildren == 2); @@ -772,11 +771,11 @@ private String processLateralView(QB qb, ASTNode lateralView) * @param ast * @param qb * @param ctx_1 - * @throws SemanticException + * @throws HiveException */ @SuppressWarnings({"fallthrough", "nls"}) public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1) - throws SemanticException { + throws HiveException { boolean phase1Result = true; QBParseInfo qbp = qb.getParseInfo(); @@ -1001,12 +1000,7 @@ public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1) throw new SemanticException(ErrorMsg.INSERT_INTO_DYNAMICPARTITION_IFNOTEXISTS .getMsg(partition.toString())); } - Table table = null; - try { - table = db.getTable(tableName); - } catch (HiveException ex) { - throw new SemanticException(ex); - } + Table table = db.getTable(tableName); try { Partition parMetaData = db.getPartition(table, partition, false); // Check partition exists if it exists skip the overwrite @@ -1047,12 +1041,12 @@ public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1) return phase1Result; } - private void getMetaData(QBExpr qbexpr) throws SemanticException { + private void getMetaData(QBExpr qbexpr) throws HiveException { getMetaData(qbexpr, null); } private void getMetaData(QBExpr qbexpr, ReadEntity parentInput) - throws SemanticException { + throws HiveException { if (qbexpr.getOpcode() == QBExpr.Opcode.NULLOP) { getMetaData(qbexpr.getQB(), parentInput); } else { @@ -1061,12 +1055,12 @@ private void getMetaData(QBExpr qbexpr, ReadEntity parentInput) } } - public void getMetaData(QB qb) throws SemanticException { + public void getMetaData(QB qb) throws HiveException { getMetaData(qb, null); } @SuppressWarnings("nls") - public void getMetaData(QB qb, ReadEntity parentInput) throws SemanticException { + public void getMetaData(QB qb, ReadEntity parentInput) throws HiveException { try { LOG.info("Get metadata for source tables"); @@ -1259,7 +1253,7 @@ public void getMetaData(QB qb, ReadEntity parentInput) throws SemanticException Warehouse wh = new Warehouse(conf); location = wh.getDatabasePath(db.getDatabase(newTable.getDbName())); } catch (MetaException e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } try { fname = ctx.getExternalTmpPath( @@ -1339,7 +1333,7 @@ public void getMetaData(QB qb, ReadEntity parentInput) throws SemanticException } private void replaceViewReferenceWithDefinition(QB qb, Table tab, - String tab_name, String alias) throws SemanticException { + String tab_name, String alias) throws HiveException { ParseDriver pd = new ParseDriver(); ASTNode viewTree; @@ -1393,7 +1387,7 @@ private boolean isPresent(String[] list, String elem) { @SuppressWarnings("nls") void parseJoinCondPopulateAlias(QBJoinTree joinTree, ASTNode condn, ArrayList leftAliases, ArrayList rightAliases, - ArrayList fields) throws SemanticException { + ArrayList fields) throws HiveException { // String[] allAliases = joinTree.getAllAliases(); switch (condn.getToken().getType()) { case HiveParser.TOK_TABLE_OR_COL: @@ -1491,7 +1485,7 @@ void parseJoinCondPopulateAlias(QBJoinTree joinTree, ASTNode condn, private void populateAliases(List leftAliases, List rightAliases, ASTNode condn, QBJoinTree joinTree, - List leftSrc) throws SemanticException { + List leftSrc) throws HiveException { if ((leftAliases.size() != 0) && (rightAliases.size() != 0)) { throw new SemanticException(ErrorMsg.INVALID_JOIN_CONDITION_1 .getMsg(condn)); @@ -1527,7 +1521,7 @@ void applyEqualityPredicateToQBJoinTree(QBJoinTree joinTree, List leftCondAl1, List leftCondAl2, List rightCondAl1, - List rightCondAl2) throws SemanticException { + List rightCondAl2) throws HiveException { if (leftCondAl1.size() != 0) { if ((rightCondAl1.size() != 0) || ((rightCondAl1.size() == 0) && (rightCondAl2.size() == 0))) { @@ -1668,7 +1662,7 @@ void applyEqualityPredicateToQBJoinTree(QBJoinTree joinTree, } private void parseJoinCondition(QBJoinTree joinTree, ASTNode joinCond, List leftSrc) - throws SemanticException { + throws HiveException { if (joinCond == null) { return; } @@ -1703,10 +1697,10 @@ private void parseJoinCondition(QBJoinTree joinTree, ASTNode joinCond, List leftSrc, JoinType type) throws SemanticException { + List leftSrc, JoinType type) throws HiveException { if (joinCond == null) { return; } @@ -1827,7 +1821,7 @@ private void parseJoinCondition(QBJoinTree joinTree, ASTNode joinCond, } } - private void extractJoinCondsFromWhereClause(QBJoinTree joinTree, QB qb, String dest, ASTNode predicate) throws SemanticException { + private void extractJoinCondsFromWhereClause(QBJoinTree joinTree, QB qb, String dest, ASTNode predicate) throws HiveException { switch (predicate.getType()) { case HiveParser.KW_AND: @@ -1902,7 +1896,7 @@ private void extractJoinCondsFromWhereClause(QBJoinTree joinTree, QB qb, String @SuppressWarnings("nls") private Operator genHavingPlan(String dest, QB qb, Operator input, Map aliasToOpInfo) - throws SemanticException { + throws HiveException { ASTNode havingExpr = qb.getParseInfo().getHavingForClause(dest); @@ -1928,7 +1922,7 @@ private Operator genHavingPlan(String dest, QB qb, Operator input, private Operator genPlanForSubQueryPredicate( QB qbSQ, - ISubQueryJoinInfo subQueryPredicate) throws SemanticException { + ISubQueryJoinInfo subQueryPredicate) throws HiveException { qbSQ.setSubQueryDef(subQueryPredicate.getSubQuery()); Phase1Ctx ctx_1 = initPhase1Ctx(); doPhase1(subQueryPredicate.getSubQueryAST(), qbSQ, ctx_1); @@ -1941,7 +1935,7 @@ private Operator genPlanForSubQueryPredicate( private Operator genFilterPlan(ASTNode searchCond, QB qb, Operator input, Map aliasToOpInfo, boolean forHavingClause) - throws SemanticException { + throws HiveException { OpParseContext inputCtx = opParseCtx.get(input); RowResolver inputRR = inputCtx.getRowResolver(); @@ -2092,7 +2086,7 @@ private Operator genFilterPlan(ASTNode searchCond, QB qb, Operator input, */ @SuppressWarnings("nls") private Operator genFilterPlan(QB qb, ASTNode condn, Operator input) - throws SemanticException { + throws HiveException { OpParseContext inputCtx = opParseCtx.get(input); RowResolver inputRR = inputCtx.getRowResolver(); @@ -2111,7 +2105,7 @@ private Operator genFilterPlan(QB qb, ASTNode condn, Operator input) private Integer genColListRegex(String colRegex, String tabAlias, ASTNode sel, ArrayList col_list, RowResolver input, Integer pos, RowResolver output, List aliases, boolean subQuery) - throws SemanticException { + throws HiveException { // The table alias should exist if (tabAlias != null && !input.hasTableAlias(tabAlias)) { @@ -2244,7 +2238,7 @@ private String fetchFilesNotInLocalFilesystem(String cmd) { } private TableDesc getTableDescFromSerDe(ASTNode child, String cols, - String colTypes, boolean defaultCols) throws SemanticException { + String colTypes, boolean defaultCols) throws HiveException { if (child.getType() == HiveParser.TOK_SERDENAME) { String serdeName = unescapeSQLString(child.getChild(0).getText()); Class serdeClass = null; @@ -2253,7 +2247,7 @@ private TableDesc getTableDescFromSerDe(ASTNode child, String cols, serdeClass = (Class) Class.forName(serdeName, true, JavaUtils.getClassLoader()); } catch (ClassNotFoundException e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } TableDesc tblDesc = PlanUtils.getTableDesc(serdeClass, Integer @@ -2324,7 +2318,7 @@ private TableDesc getTableDescFromSerDe(ASTNode child, String cols, } private void failIfColAliasExists(Set nameSet, String name) - throws SemanticException { + throws HiveException { if (nameSet.contains(name)) { throw new SemanticException(ErrorMsg.COLUMN_ALIAS_ALREADY_EXISTS .getMsg(name)); @@ -2334,7 +2328,7 @@ private void failIfColAliasExists(Set nameSet, String name) @SuppressWarnings("nls") private Operator genScriptPlan(ASTNode trfm, QB qb, Operator input) - throws SemanticException { + throws HiveException { // If there is no "AS" clause, the output schema will be "key,value" ArrayList outputCols = new ArrayList(); int inputSerDeNum = 1, inputRecordWriterNum = 2; @@ -2441,7 +2435,7 @@ private Operator genScriptPlan(ASTNode trfm, QB qb, Operator input) serde = (Class) Class.forName(defaultSerdeName, true, JavaUtils.getClassLoader()); } catch (ClassNotFoundException e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } int fieldSeparator = Utilities.tabCode; @@ -2494,7 +2488,7 @@ private Operator genScriptPlan(ASTNode trfm, QB qb, Operator input) } private Class getRecordReader(ASTNode node) - throws SemanticException { + throws HiveException { String name; if (node.getChildCount() == 0) { @@ -2507,12 +2501,12 @@ private Operator genScriptPlan(ASTNode trfm, QB qb, Operator input) return (Class) Class.forName(name, true, JavaUtils.getClassLoader()); } catch (ClassNotFoundException e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } } private Class getDefaultRecordReader() - throws SemanticException { + throws HiveException { String name; name = conf.getVar(HiveConf.ConfVars.HIVESCRIPTRECORDREADER); @@ -2521,12 +2515,12 @@ private Operator genScriptPlan(ASTNode trfm, QB qb, Operator input) return (Class) Class.forName(name, true, JavaUtils.getClassLoader()); } catch (ClassNotFoundException e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } } private Class getRecordWriter(ASTNode node) - throws SemanticException { + throws HiveException { String name; if (node.getChildCount() == 0) { @@ -2539,7 +2533,7 @@ private Operator genScriptPlan(ASTNode trfm, QB qb, Operator input) return (Class) Class.forName(name, true, JavaUtils.getClassLoader()); } catch (ClassNotFoundException e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } } @@ -2564,7 +2558,7 @@ private Operator genScriptPlan(ASTNode trfm, QB qb, Operator input) // Even if rollups and cubes are present in the query, they are converted to // grouping sets at this point private ObjectPair, List> getGroupByGroupingSetsForClause( - QBParseInfo parseInfo, String dest) throws SemanticException { + QBParseInfo parseInfo, String dest) throws HiveException { List groupingSets = new ArrayList(); List groupByExprs = getGroupByForClause(parseInfo, dest); if (parseInfo.getDestRollups().contains(dest)) { @@ -2579,7 +2573,7 @@ private Operator genScriptPlan(ASTNode trfm, QB qb, Operator input) } private List getGroupingSets(List groupByExpr, QBParseInfo parseInfo, - String dest) throws SemanticException { + String dest) throws HiveException { Map exprPos = new HashMap(); for (int i = 0; i < groupByExpr.size(); ++i) { ASTNode node = groupByExpr.get(i); @@ -2764,7 +2758,7 @@ private static boolean isRegex(String pattern, HiveConf conf) { private Operator genSelectPlan(String dest, QB qb, Operator input) - throws SemanticException { + throws HiveException { ASTNode selExprList = qb.getParseInfo().getSelForClause(dest); Operator op = genSelectPlan(selExprList, qb, input, false); @@ -2778,7 +2772,7 @@ private static boolean isRegex(String pattern, HiveConf conf) { @SuppressWarnings("nls") private Operator genSelectPlan(ASTNode selExprList, QB qb, - Operator input, boolean outerLV) throws SemanticException { + Operator input, boolean outerLV) throws HiveException { if (LOG.isDebugEnabled()) { LOG.debug("tree: " + selExprList.toStringTree()); @@ -3092,7 +3086,7 @@ private String recommendName(ExprNodeDesc exp, String colAlias) { static GenericUDAFEvaluator getGenericUDAFEvaluator(String aggName, ArrayList aggParameters, ASTNode aggTree, boolean isDistinct, boolean isAllColumns) - throws SemanticException { + throws HiveException { ArrayList originalParameterTypeInfos = getWritableObjectInspector(aggParameters); GenericUDAFEvaluator result = FunctionRegistry.getGenericUDAFEvaluator( @@ -3116,12 +3110,12 @@ static GenericUDAFEvaluator getGenericUDAFEvaluator(String aggName, * @param aggTree * The ASTNode node of the UDAF in the query. * @return GenericUDAFInfo - * @throws SemanticException + * @throws HiveException * when the UDAF is not found or has problems. */ static GenericUDAFInfo getGenericUDAFInfo(GenericUDAFEvaluator evaluator, GenericUDAFEvaluator.Mode emode, ArrayList aggParameters) - throws SemanticException { + throws HiveException { GenericUDAFInfo r = new GenericUDAFInfo(); @@ -3129,18 +3123,13 @@ static GenericUDAFInfo getGenericUDAFInfo(GenericUDAFEvaluator evaluator, r.genericUDAFEvaluator = evaluator; // set r.returnType - ObjectInspector returnOI = null; - try { - ArrayList aggOIs = getWritableObjectInspector(aggParameters); - ObjectInspector[] aggOIArray = new ObjectInspector[aggOIs.size()]; - for (int ii = 0; ii < aggOIs.size(); ++ii) { - aggOIArray[ii] = aggOIs.get(ii); - } - returnOI = r.genericUDAFEvaluator.init(emode, aggOIArray); - r.returnType = TypeInfoUtils.getTypeInfoFromObjectInspector(returnOI); - } catch (HiveException e) { - throw new SemanticException(e); + ArrayList aggOIs = getWritableObjectInspector(aggParameters); + ObjectInspector[] aggOIArray = new ObjectInspector[aggOIs.size()]; + for (int ii = 0; ii < aggOIs.size(); ++ii) { + aggOIArray[ii] = aggOIs.get(ii); } + ObjectInspector returnOI = r.genericUDAFEvaluator.init(emode, aggOIArray); + r.returnType = TypeInfoUtils.getTypeInfoFromObjectInspector(returnOI); // set r.convertedParameters // TODO: type conversion r.convertedParameters = aggParameters; @@ -3232,7 +3221,7 @@ private ExprNodeDesc isConstantParameterInAggregationParameters(String internalN private Operator genGroupByPlanGroupByOperator(QBParseInfo parseInfo, String dest, Operator input, ReduceSinkOperator rs, GroupByDesc.Mode mode, Map genericUDAFEvaluators) - throws SemanticException { + throws HiveException { RowResolver groupByInputRowResolver = opParseCtx .get(input).getRowResolver(); RowResolver groupByOutputRowResolver = new RowResolver(); @@ -3367,7 +3356,7 @@ private void addGroupingSetKey(List groupByKeys, RowResolver groupByInputRowResolver, RowResolver groupByOutputRowResolver, List outputColumnNames, - Map colExprMap) throws SemanticException { + Map colExprMap) throws HiveException { // For grouping sets, add a dummy grouping key String groupingSetColumnName = groupByInputRowResolver.get(null, VirtualColumn.GROUPINGID.getName()).getInternalName(); @@ -3398,7 +3387,7 @@ private void processGroupingSetReduceSinkOperator(RowResolver reduceSinkInputRow RowResolver reduceSinkOutputRowResolver, List reduceKeys, List outputKeyColumnNames, - Map colExprMap) throws SemanticException { + Map colExprMap) throws HiveException { // add a key for reduce sink String groupingSetColumnName = reduceSinkInputRowResolver.get(null, VirtualColumn.GROUPINGID.getName()).getInternalName(); @@ -3442,7 +3431,7 @@ private Operator genGroupByPlanGroupByOperator1(QBParseInfo parseInfo, boolean distPartAgg, List groupingSets, boolean groupingSetsPresent, - boolean groupingSetsNeedAdditionalMRJob) throws SemanticException { + boolean groupingSetsNeedAdditionalMRJob) throws HiveException { ArrayList outputColumnNames = new ArrayList(); RowResolver groupByInputRowResolver = opParseCtx .get(reduceSinkOperatorInfo).getRowResolver(); @@ -3682,7 +3671,7 @@ private Operator genGroupByPlanMapGroupByOperator(QB qb, GroupByDesc.Mode mode, Map genericUDAFEvaluators, List groupingSetKeys, - boolean groupingSetsPresent) throws SemanticException { + boolean groupingSetsPresent) throws HiveException { RowResolver groupByInputRowResolver = opParseCtx.get(inputOperatorInfo) .getRowResolver(); @@ -3816,7 +3805,7 @@ private Operator genGroupByPlanMapGroupByOperator(QB qb, * the number of fields for map-reduce partitioning. This is usually * the number of fields in the Group By keys. * @return the new ReduceSinkOperator. - * @throws SemanticException + * @throws HiveException */ @SuppressWarnings("nls") private ReduceSinkOperator genGroupByPlanReduceSinkOperator(QB qb, @@ -3827,7 +3816,7 @@ private ReduceSinkOperator genGroupByPlanReduceSinkOperator(QB qb, boolean changeNumPartitionFields, int numReducers, boolean mapAggrDone, - boolean groupingSetsPresent) throws SemanticException { + boolean groupingSetsPresent) throws HiveException { RowResolver reduceSinkInputRowResolver = opParseCtx.get(inputOperatorInfo) .getRowResolver(); @@ -3907,7 +3896,7 @@ private ReduceSinkOperator genGroupByPlanReduceSinkOperator(QB qb, private ArrayList getReduceKeysForReduceSink(List grpByExprs, String dest, RowResolver reduceSinkInputRowResolver, RowResolver reduceSinkOutputRowResolver, List outputKeyColumnNames, Map colExprMap) - throws SemanticException { + throws HiveException { ArrayList reduceKeys = new ArrayList(); @@ -3938,7 +3927,7 @@ private ReduceSinkOperator genGroupByPlanReduceSinkOperator(QB qb, List reduceKeys, RowResolver reduceSinkInputRowResolver, RowResolver reduceSinkOutputRowResolver, List outputKeyColumnNames, Map colExprMap) - throws SemanticException { + throws HiveException { List> distinctColIndices = new ArrayList>(); @@ -3988,7 +3977,7 @@ private ReduceSinkOperator genGroupByPlanReduceSinkOperator(QB qb, private void getReduceValuesForReduceSinkNoMapAgg(QBParseInfo parseInfo, String dest, RowResolver reduceSinkInputRowResolver, RowResolver reduceSinkOutputRowResolver, List outputValueColumnNames, ArrayList reduceValues, - Map colExprMap) throws SemanticException { + Map colExprMap) throws HiveException { HashMap aggregationTrees = parseInfo .getAggregationExprsForClause(dest); @@ -4016,7 +4005,7 @@ private void getReduceValuesForReduceSinkNoMapAgg(QBParseInfo parseInfo, String @SuppressWarnings("nls") private ReduceSinkOperator genCommonGroupByPlanReduceSinkOperator(QB qb, List dests, - Operator inputOperatorInfo) throws SemanticException { + Operator inputOperatorInfo) throws HiveException { RowResolver reduceSinkInputRowResolver = opParseCtx.get(inputOperatorInfo) .getRowResolver(); @@ -4130,7 +4119,7 @@ private void removeRecursively(ASTNode current, Map mappi * able to remove this parameter since in this phase there is no * distinct any more. * @return the new ReduceSinkOperator. - * @throws SemanticException + * @throws HiveException */ @SuppressWarnings("nls") private Operator genGroupByPlanReduceSinkOperator2MR(QBParseInfo parseInfo, @@ -4138,7 +4127,7 @@ private Operator genGroupByPlanReduceSinkOperator2MR(QBParseInfo parseInfo, Operator groupByOperatorInfo, int numPartitionFields, int numReducers, - boolean groupingSetsPresent) throws SemanticException { + boolean groupingSetsPresent) throws HiveException { RowResolver reduceSinkInputRowResolver2 = opParseCtx.get( groupByOperatorInfo).getRowResolver(); RowResolver reduceSinkOutputRowResolver2 = new RowResolver(); @@ -4218,7 +4207,7 @@ private Operator genGroupByPlanReduceSinkOperator2MR(QBParseInfo parseInfo, * The mapping from Aggregation StringTree to the * genericUDAFEvaluator. * @return the new GroupByOperator - * @throws SemanticException + * @throws HiveException */ @SuppressWarnings("nls") private Operator genGroupByPlanGroupByOperator2MR(QBParseInfo parseInfo, @@ -4226,7 +4215,7 @@ private Operator genGroupByPlanGroupByOperator2MR(QBParseInfo parseInfo, Operator reduceSinkOperatorInfo2, GroupByDesc.Mode mode, Map genericUDAFEvaluators, - boolean groupingSetsPresent) throws SemanticException { + boolean groupingSetsPresent) throws HiveException { RowResolver groupByInputRowResolver2 = opParseCtx.get( reduceSinkOperatorInfo2).getRowResolver(); @@ -4333,7 +4322,7 @@ private Operator genGroupByPlanGroupByOperator2MR(QBParseInfo parseInfo, * @param qb * @param input * @return - * @throws SemanticException + * @throws HiveException * * Generate a Group-By plan using 1 map-reduce job. Spray by the * group by key, and sort by the distinct key (if any), and compute @@ -4347,7 +4336,7 @@ private Operator genGroupByPlanGroupByOperator2MR(QBParseInfo parseInfo, **/ @SuppressWarnings({"nls"}) private Operator genGroupByPlan1MR(String dest, QB qb, Operator input) - throws SemanticException { + throws HiveException { QBParseInfo parseInfo = qb.getParseInfo(); @@ -4389,7 +4378,7 @@ private Operator genGroupByPlan1MR(String dest, QB qb, Operator input) @SuppressWarnings({"nls"}) private Operator genGroupByPlan1ReduceMultiGBY(List dests, QB qb, Operator input, Map aliasToOpInfo) - throws SemanticException { + throws HiveException { QBParseInfo parseInfo = qb.getParseInfo(); @@ -4498,7 +4487,7 @@ private Operator genGroupByPlan1ReduceMultiGBY(List dests, QB qb, Operat * @param qb * @param input * @return - * @throws SemanticException + * @throws HiveException * * Generate a Group-By plan using a 2 map-reduce jobs. Spray by the * distinct key in hope of getting a uniform distribution, and @@ -4521,7 +4510,7 @@ private Operator genGroupByPlan1ReduceMultiGBY(List dests, QB qb, Operat */ @SuppressWarnings("nls") private Operator genGroupByPlan2MRMultiGroupBy(String dest, QB qb, - Operator input) throws SemanticException { + Operator input) throws HiveException { // ////// Generate GroupbyOperator for a map-side partial aggregation Map genericUDAFEvaluators = @@ -4564,7 +4553,7 @@ private Operator genGroupByPlan2MRMultiGroupBy(String dest, QB qb, * @param qb * @param input * @return - * @throws SemanticException + * @throws HiveException * * Generate a Group-By plan using a 2 map-reduce jobs. Spray by the * grouping key and distinct key (or a random number, if no distinct @@ -4592,7 +4581,7 @@ private Operator genGroupByPlan2MRMultiGroupBy(String dest, QB qb, */ @SuppressWarnings("nls") private Operator genGroupByPlan2MR(String dest, QB qb, Operator input) - throws SemanticException { + throws HiveException { QBParseInfo parseInfo = qb.getParseInfo(); @@ -4665,7 +4654,7 @@ private boolean optimizeMapAggrGroupBy(String dest, QB qb) { } static private void extractColumns(Set colNamesExprs, - ExprNodeDesc exprNode) throws SemanticException { + ExprNodeDesc exprNode) throws HiveException { if (exprNode instanceof ExprNodeColumnDesc) { colNamesExprs.add(((ExprNodeColumnDesc) exprNode).getColumn()); return; @@ -4692,7 +4681,7 @@ static private boolean hasCommonElement(Set set1, Set set2) { private void checkExpressionsForGroupingSet(List grpByExprs, List distinctGrpByExprs, Map aggregationTrees, - RowResolver inputRowResolver) throws SemanticException { + RowResolver inputRowResolver) throws HiveException { Set colNamesGroupByExprs = new HashSet(); Set colNamesGroupByDistinctExprs = new HashSet(); @@ -4789,7 +4778,7 @@ private void checkExpressionsForGroupingSet(List grpByExprs, */ @SuppressWarnings("nls") private Operator genGroupByPlanMapAggrNoSkew(String dest, QB qb, - Operator inputOperatorInfo) throws SemanticException { + Operator inputOperatorInfo) throws HiveException { QBParseInfo parseInfo = qb.getParseInfo(); ObjectPair, List> grpByExprsGroupingSets = @@ -4957,7 +4946,7 @@ private Operator genGroupByPlanMapAggrNoSkew(String dest, QB qb, */ @SuppressWarnings("nls") private Operator genGroupByPlanMapAggr2MR(String dest, QB qb, - Operator inputOperatorInfo) throws SemanticException { + Operator inputOperatorInfo) throws HiveException { QBParseInfo parseInfo = qb.getParseInfo(); @@ -5057,7 +5046,7 @@ private Operator genGroupByPlanMapAggr2MR(String dest, QB qb, @SuppressWarnings("nls") private Operator genConversionOps(String dest, QB qb, Operator input) - throws SemanticException { + throws HiveException { Integer dest_type = qb.getMetaData().getDestTypeForAlias(dest); switch (dest_type.intValue()) { @@ -5163,7 +5152,7 @@ public void setTotalFiles(int totalFiles) { @SuppressWarnings("nls") private Operator genBucketingSortingDest(String dest, Operator input, QB qb, - TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException { + TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws HiveException { // If the table is bucketed, and bucketing is enforced, do the following: // If the number of buckets is smaller than the number of maximum reducers, @@ -5255,7 +5244,7 @@ private boolean checkHoldDDLTime(QB qb) { @SuppressWarnings("nls") private Operator genFileSinkPlan(String dest, QB qb, Operator input) - throws SemanticException { + throws HiveException { RowResolver inputRR = opParseCtx.get(input).getRowResolver(); QBMetaData qbm = qb.getMetaData(); @@ -5443,15 +5432,11 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) ltd.setLbCtx(lbCtx); if (holdDDLTime) { - try { - Partition part = db.getPartition(dest_tab, dest_part.getSpec(), false); - if (part == null) { - throw new SemanticException(generateErrorMessage( - qb.getParseInfo().getDestForClause(dest), - ErrorMsg.HOLD_DDLTIME_ON_NONEXIST_PARTITIONS.getMsg())); - } - } catch (HiveException e) { - throw new SemanticException(e); + Partition part = db.getPartition(dest_tab, dest_part.getSpec(), false); + if (part == null) { + throw new SemanticException(generateErrorMessage( + qb.getParseInfo().getDestForClause(dest), + ErrorMsg.HOLD_DDLTIME_ON_NONEXIST_PARTITIONS.getMsg())); } LOG.info("this query will not update transient_lastDdlTime!"); ltd.setHoldDDLTime(true); @@ -5644,7 +5629,7 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) String staticSpec = Warehouse.makePartPath(dest_part.getSpec()); fileSinkDesc.setStaticSpec(staticSpec); } catch (MetaException e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } } else if (dpCtx != null) { fileSinkDesc.setStaticSpec(dpCtx.getSPPath()); @@ -5672,7 +5657,7 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) * types that are expected by the table_desc. */ Operator genConversionSelectOperator(String dest, QB qb, Operator input, - TableDesc table_desc, DynamicPartitionCtx dpCtx) throws SemanticException { + TableDesc table_desc, DynamicPartitionCtx dpCtx) throws HiveException { StructObjectInspector oi = null; try { Deserializer deserializer = table_desc.getDeserializerClass() @@ -5787,7 +5772,7 @@ Operator genConversionSelectOperator(String dest, QB qb, Operator input, @SuppressWarnings("nls") private Operator genLimitPlan(String dest, QB qb, Operator input, int limit) - throws SemanticException { + throws HiveException { // A map-only job can be optimized - instead of converting it to a // map-reduce job, we can have another map // job to do the same to avoid the cost of sorting in the map-reduce phase. @@ -5814,7 +5799,7 @@ private Operator genLimitPlan(String dest, QB qb, Operator input, int limit) private Operator genUDTFPlan(GenericUDTF genericUDTF, String outputTableAlias, ArrayList colAliases, QB qb, - Operator input, boolean outerLV) throws SemanticException { + Operator input, boolean outerLV) throws HiveException { // No GROUP BY / DISTRIBUTE BY / SORT BY / CLUSTER BY QBParseInfo qbp = qb.getParseInfo(); @@ -5907,7 +5892,7 @@ private Operator genUDTFPlan(GenericUDTF genericUDTF, @SuppressWarnings("nls") private Operator genLimitMapRedPlan(String dest, QB qb, Operator input, - int limit, boolean extraMRStep) throws SemanticException { + int limit, boolean extraMRStep) throws HiveException { // A map-only job can be optimized - instead of converting it to a // map-reduce job, we can have another map // job to do the same to avoid the cost of sorting in the map-reduce phase. @@ -5928,7 +5913,7 @@ private Operator genLimitMapRedPlan(String dest, QB qb, Operator input, private ArrayList getParitionColsFromBucketCols(String dest, QB qb, Table tab, TableDesc table_desc, Operator input, boolean convert) - throws SemanticException { + throws HiveException { List tabBucketCols = tab.getBucketCols(); List tabCols = tab.getCols(); @@ -5951,7 +5936,7 @@ private Operator genLimitMapRedPlan(String dest, QB qb, Operator input, private ArrayList genConvertCol(String dest, QB qb, Table tab, TableDesc table_desc, Operator input, List posns, boolean convert) - throws SemanticException { + throws HiveException { StructObjectInspector oi = null; try { Deserializer deserializer = table_desc.getDeserializerClass() @@ -5959,7 +5944,7 @@ private Operator genLimitMapRedPlan(String dest, QB qb, Operator input, deserializer.initialize(conf, table_desc.getProperties()); oi = (StructObjectInspector) deserializer.getObjectInspector(); } catch (Exception e) { - throw new SemanticException(e); + throw HiveException.wrap(e); } List tableFields = oi.getAllStructFieldRefs(); @@ -6001,7 +5986,7 @@ private Operator genLimitMapRedPlan(String dest, QB qb, Operator input, private ArrayList getSortCols(String dest, QB qb, Table tab, TableDesc table_desc, Operator input, boolean convert) - throws SemanticException { + throws HiveException { RowResolver inputRR = opParseCtx.get(input).getRowResolver(); List tabSortCols = tab.getSortCols(); List tabCols = tab.getCols(); @@ -6024,7 +6009,7 @@ private Operator genLimitMapRedPlan(String dest, QB qb, Operator input, } private ArrayList getSortOrders(String dest, QB qb, Table tab, Operator input) - throws SemanticException { + throws HiveException { RowResolver inputRR = opParseCtx.get(input).getRowResolver(); List tabSortCols = tab.getSortCols(); List tabCols = tab.getCols(); @@ -6047,7 +6032,7 @@ private Operator genReduceSinkPlanForSortingBucketing(Table tab, Operator input, List sortOrders, ArrayList partitionCols, int numReducers) - throws SemanticException { + throws HiveException { RowResolver inputRR = opParseCtx.get(input).getRowResolver(); // For the generation of the values expression just get the inputs @@ -6107,7 +6092,7 @@ private Operator genReduceSinkPlanForSortingBucketing(Table tab, Operator input, @SuppressWarnings("nls") private Operator genReduceSinkPlan(String dest, QB qb, Operator input, - int numReducers) throws SemanticException { + int numReducers) throws HiveException { RowResolver inputRR = opParseCtx.get(input).getRowResolver(); @@ -6218,7 +6203,7 @@ private Operator genReduceSinkPlan(String dest, QB qb, Operator input, } private Operator genJoinOperatorChildren(QBJoinTree join, Operator left, - Operator[] right, HashSet omitOpts) throws SemanticException { + Operator[] right, HashSet omitOpts) throws HiveException { RowResolver outputRS = new RowResolver(); ArrayList outputColumnNames = new ArrayList(); @@ -6316,7 +6301,7 @@ private Operator genJoinOperatorChildren(QBJoinTree join, Operator left, @SuppressWarnings("nls") private Operator genJoinReduceSinkChild(QB qb, QBJoinTree joinTree, - Operator child, String srcName, int pos) throws SemanticException { + Operator child, String srcName, int pos) throws HiveException { RowResolver inputRS = opParseCtx.get(child).getRowResolver(); RowResolver outputRS = new RowResolver(); ArrayList outputColumns = new ArrayList(); @@ -6382,7 +6367,7 @@ private Operator genJoinReduceSinkChild(QB qb, QBJoinTree joinTree, private Operator genJoinOperator(QB qb, QBJoinTree joinTree, Map map, - Operator joiningOp) throws SemanticException { + Operator joiningOp) throws HiveException { QBJoinTree leftChild = joinTree.getJoinSrc(); Operator joinSrcOp = joiningOp instanceof JoinOperator ? joiningOp : null; @@ -6458,10 +6443,10 @@ private Operator genJoinOperator(QB qb, QBJoinTree joinTree, * @param input * input operator * @return the selection operator. - * @throws SemanticException + * @throws HiveException */ private Operator insertSelectForSemijoin(ArrayList fields, - Operator input) throws SemanticException { + Operator input) throws HiveException { RowResolver inputRR = opParseCtx.get(input).getRowResolver(); ArrayList colList = new ArrayList(); @@ -6492,7 +6477,7 @@ private Operator genMapGroupByForSemijoin(QB qb, ArrayList fields, // t // key // "tab.col" Operator inputOperatorInfo, GroupByDesc.Mode mode) - throws SemanticException { + throws HiveException { RowResolver groupByInputRowResolver = opParseCtx.get(inputOperatorInfo) .getRowResolver(); @@ -6540,7 +6525,7 @@ private Operator genMapGroupByForSemijoin(QB qb, ArrayList fields, // t } private void genJoinOperatorTypeCheck(Operator left, Operator[] right) - throws SemanticException { + throws HiveException { // keys[i] -> ArrayList for the i-th join operator key list ArrayList> keys = new ArrayList>(); int keyLength = 0; @@ -6593,7 +6578,7 @@ private void genJoinOperatorTypeCheck(Operator left, Operator[] right) } private Operator genJoinPlan(QB qb, Map map) - throws SemanticException { + throws HiveException { QBJoinTree joinTree = qb.getQbJoinTree(); Operator joinOp = genJoinOperator(qb, joinTree, map, null); return joinOp; @@ -6604,7 +6589,7 @@ private Operator genJoinPlan(QB qb, Map map) * source operators. This procedure traverses the query tree recursively, */ private void pushJoinFilters(QB qb, QBJoinTree joinTree, - Map map) throws SemanticException { + Map map) throws HiveException { pushJoinFilters(qb, joinTree, map, true); } @@ -6614,7 +6599,7 @@ private void pushJoinFilters(QB qb, QBJoinTree joinTree, */ private void pushJoinFilters(QB qb, QBJoinTree joinTree, Map map, - boolean recursively) throws SemanticException { + boolean recursively) throws HiveException { if ( recursively ) { if (joinTree.getJoinSrc() != null) { pushJoinFilters(qb, joinTree.getJoinSrc(), map); @@ -6674,7 +6659,7 @@ private String getModifiedAlias(QB qb, String alias) { private QBJoinTree genUniqueJoinTree(QB qb, ASTNode joinParseTree, Map aliasToOpInfo) - throws SemanticException { + throws HiveException { QBJoinTree joinTree = new QBJoinTree(); joinTree.setNoOuterJoin(false); @@ -6799,7 +6784,7 @@ private QBJoinTree genUniqueJoinTree(QB qb, ASTNode joinParseTree, private QBJoinTree genSQJoinTree(QB qb, ISubQueryJoinInfo subQuery, Operator joiningOp, Map aliasToOpInfo) - throws SemanticException { + throws HiveException { QBJoinTree joinTree = new QBJoinTree(); JoinCond[] condn = new JoinCond[1]; @@ -6902,7 +6887,7 @@ private QBJoinTree genSQJoinTree(QB qb, ISubQueryJoinInfo subQuery, private QBJoinTree genJoinTree(QB qb, ASTNode joinParseTree, Map aliasToOpInfo) - throws SemanticException { + throws HiveException { QBJoinTree joinTree = new QBJoinTree(); JoinCond[] condn = new JoinCond[1]; @@ -7379,7 +7364,7 @@ private JoinType getType(JoinCond[] conds) { } private Operator insertSelectAllPlanForGroupBy(Operator input) - throws SemanticException { + throws HiveException { OpParseContext inputCtx = opParseCtx.get(input); RowResolver inputRR = inputCtx.getRowResolver(); ArrayList columns = inputRR.getColumnInfos(); @@ -7445,7 +7430,7 @@ private Operator insertSelectAllPlanForGroupBy(Operator input) List currDestList; try { currDestList = getDistinctExprs(qbp, dest, inputRR); - } catch (SemanticException e) { + } catch (HiveException e) { return null; } @@ -7471,7 +7456,7 @@ private Operator insertSelectAllPlanForGroupBy(Operator input) } private Operator createCommonReduceSink(QB qb, Operator input) - throws SemanticException { + throws HiveException { // Go over all the tables and extract the common distinct key List distExprs = getCommonDistinctExprs(qb, input); @@ -7566,7 +7551,7 @@ private Operator createCommonReduceSink(QB qb, Operator input) // group by and distinct keys and no clause appears in more than one list. Returns a list of the // lists of clauses. private List> getCommonGroupByDestGroups(QB qb, - Map> inputs) throws SemanticException { + Map> inputs) throws HiveException { QBParseInfo qbp = qb.getParseInfo(); @@ -7697,7 +7682,7 @@ private boolean matchExprLists(List list1, List list // Returns a list of the distinct exprs without duplicates for a given clause name private List getDistinctExprs(QBParseInfo qbp, String dest, RowResolver inputRR) - throws SemanticException { + throws HiveException { List distinctAggExprs = qbp.getDistinctFuncExprsForClause(dest); List distinctExprs = new ArrayList(); @@ -7734,7 +7719,7 @@ private boolean distinctExprsExists(QB qb) { @SuppressWarnings("nls") private Operator genBodyPlan(QB qb, Operator input, Map aliasToOpInfo) - throws SemanticException { + throws HiveException { QBParseInfo qbp = qb.getParseInfo(); TreeSet ks = new TreeSet(qbp.getClauseNames()); @@ -7792,7 +7777,7 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT if (conf.getBoolVar(HiveConf.ConfVars.HIVEMULTIGROUPBYSINGLEREDUCER)) { try { commonGroupByDestGroups = getCommonGroupByDestGroups(qb, inputs); - } catch (SemanticException e) { + } catch (HiveException e) { LOG.error("Failed to group clauses by common spray keys.", e); } } @@ -7875,7 +7860,7 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT } private Map> createInputForDests(QB qb, - Operator input, Set dests) throws SemanticException { + Operator input, Set dests) throws HiveException { Map> inputs = new HashMap>(); for (String dest : dests) { @@ -7886,7 +7871,7 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT private Operator genPostGroupByBodyPlan(Operator curr, String dest, QB qb, Map aliasToOpInfo) - throws SemanticException { + throws HiveException { QBParseInfo qbp = qb.getParseInfo(); @@ -7995,7 +7980,7 @@ private Operator genPostGroupByBodyPlan(Operator curr, String dest, QB qb, @SuppressWarnings("nls") private Operator genUnionPlan(String unionalias, String leftalias, Operator leftOp, String rightalias, Operator rightOp) - throws SemanticException { + throws HiveException { // Currently, the unions are not merged - each union has only 2 parents. So, // a n-way union will lead to (n-1) union operators. @@ -8142,12 +8127,12 @@ private Operator genUnionPlan(String unionalias, String leftalias, * @param unionalias * The alias of the union. * @return - * @throws SemanticException + * @throws HiveException */ private Operator genInputSelectForUnion( Operator origInputOp, Map origInputFieldMap, String origInputAlias, RowResolver unionoutRR, String unionalias) - throws SemanticException { + throws HiveException { List columns = new ArrayList(); boolean needsCast = false; @@ -8223,7 +8208,7 @@ private Operator genUnionPlan(String unionalias, String leftalias, private ExprNodeDesc genSamplePredicate(TableSample ts, List bucketCols, boolean useBucketCols, String alias, RowResolver rwsch, QBMetaData qbm, ExprNodeDesc planExpr) - throws SemanticException { + throws HiveException { ExprNodeDesc numeratorExpr = new ExprNodeConstantDesc( TypeInfoFactory.intTypeInfo, Integer.valueOf(ts.getNumerator() - 1)); @@ -8278,7 +8263,7 @@ private String getAliasId(String alias, QB qb) { } @SuppressWarnings("nls") - private Operator genTablePlan(String alias, QB qb) throws SemanticException { + private Operator genTablePlan(String alias, QB qb) throws HiveException { String alias_id = getAliasId(alias, qb); Table tab = qb.getMetaData().getSrcForAlias(alias); @@ -8512,7 +8497,7 @@ private boolean isSkewedCol(String alias, QB qb, String colName) { private void setupStats(TableScanDesc tsDesc, QBParseInfo qbp, Table tab, String alias, RowResolver rwsch) - throws SemanticException { + throws HiveException { if (!qbp.isAnalyzeCommand()) { tsDesc.setGatherStats(false); @@ -8570,7 +8555,7 @@ private void setupStats(TableScanDesc tsDesc, QBParseInfo qbp, Table tab, String } } - private Operator genPlan(QBExpr qbexpr) throws SemanticException { + private Operator genPlan(QBExpr qbexpr) throws HiveException { if (qbexpr.getOpcode() == QBExpr.Opcode.NULLOP) { return genPlan(qbexpr.getQB()); } @@ -8585,7 +8570,7 @@ private Operator genPlan(QBExpr qbexpr) throws SemanticException { } @SuppressWarnings("nls") - public Operator genPlan(QB qb) throws SemanticException { + public Operator genPlan(QB qb) throws HiveException { // First generate all the opInfos for the elements in the from clause Map aliasToOpInfo = new HashMap(); @@ -8692,11 +8677,11 @@ public Operator genPlan(QB qb) throws SemanticException { * A mapping from a table alias to the TS operator. This function * replaces the operator mapping as necessary * @param qb - * @throws SemanticException + * @throws HiveException */ void genLateralViewPlans(Map aliasToOpInfo, QB qb) - throws SemanticException { + throws HiveException { Map> aliasToLateralViews = qb.getParseInfo() .getAliasToLateralViews(); for (Entry e : aliasToOpInfo.entrySet()) { @@ -8724,7 +8709,7 @@ void genLateralViewPlans(Map aliasToOpInfo, QB qb) } private Operator genLateralViewPlanForDest(String dest, QB qb, Operator op) - throws SemanticException { + throws HiveException { ASTNode lateralViewTree = qb.getParseInfo().getDestToLateralView().get(dest); if (lateralViewTree != null) { return genLateralViewPlan(qb, op, lateralViewTree); @@ -8733,7 +8718,7 @@ private Operator genLateralViewPlanForDest(String dest, QB qb, Operator op) } private Operator genLateralViewPlan(QB qb, Operator op, ASTNode lateralViewTree) - throws SemanticException { + throws HiveException { RowResolver lvForwardRR = new RowResolver(); RowResolver source = opParseCtx.get(op).getRowResolver(); for (ColumnInfo col : source.getColumnInfos()) { @@ -8851,7 +8836,7 @@ public void init() { @Override @SuppressWarnings("nls") - public void analyzeInternal(ASTNode ast) throws SemanticException { + public void analyzeInternal(ASTNode ast) throws HiveException { ASTNode child = ast; this.ast = ast; viewsExpanded = new ArrayList(); @@ -8975,7 +8960,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { return resultSchema; } - private void saveViewDefinition() throws SemanticException { + private void saveViewDefinition() throws HiveException { // Make a copy of the statement's result schema, since we may // modify it below as part of imposing view column names. @@ -9102,7 +9087,7 @@ private void saveViewDefinition() throws SemanticException { * Generates an expression node descriptor for the expression with TypeCheckCtx. */ public ExprNodeDesc genExprNodeDesc(ASTNode expr, RowResolver input) - throws SemanticException { + throws HiveException { // Since the user didn't supply a customized type-checking context, // use default settings. TypeCheckCtx tcCtx = new TypeCheckCtx(input); @@ -9114,7 +9099,7 @@ public ExprNodeDesc genExprNodeDesc(ASTNode expr, RowResolver input) * with default TypeCheckCtx. */ public Map genAllExprNodeDesc(ASTNode expr, RowResolver input) - throws SemanticException { + throws HiveException { TypeCheckCtx tcCtx = new TypeCheckCtx(input); return genAllExprNodeDesc(expr, input, tcCtx); } @@ -9124,7 +9109,7 @@ public ExprNodeDesc genExprNodeDesc(ASTNode expr, RowResolver input) * If it's evaluated already in previous operator, it can be retrieved from cache. */ public ExprNodeDesc genExprNodeDesc(ASTNode expr, RowResolver input, - TypeCheckCtx tcCtx) throws SemanticException { + TypeCheckCtx tcCtx) throws HiveException { // We recursively create the exprNodeDesc. Base cases: when we encounter // a column ref, we convert that into an exprNodeColumnDesc; when we // encounter @@ -9145,7 +9130,7 @@ public ExprNodeDesc genExprNodeDesc(ASTNode expr, RowResolver input, * Find ExprNodeDesc for the expression cached in the RowResolver. Returns null if not exists. */ private ExprNodeDesc getExprNodeDescCached(ASTNode expr, RowResolver input) - throws SemanticException { + throws HiveException { ColumnInfo colInfo = input.getExpression(expr); if (colInfo != null) { ASTNode source = input.getExpressionSource(expr); @@ -9171,11 +9156,11 @@ private ExprNodeDesc getExprNodeDescCached(ASTNode expr, RowResolver input) * @param tcCtx * Customized type-checking context * @return expression to exprNodeDesc mapping - * @throws SemanticException Failed to evaluate expression + * @throws HiveException Failed to evaluate expression */ @SuppressWarnings("nls") public Map genAllExprNodeDesc(ASTNode expr, RowResolver input, - TypeCheckCtx tcCtx) throws SemanticException { + TypeCheckCtx tcCtx) throws HiveException { // Create the walker and the rules dispatcher. tcCtx.setUnparseTranslator(unparseTranslator); @@ -9222,7 +9207,7 @@ private ExprNodeDesc getExprNodeDescCached(ASTNode expr, RowResolver input) } @Override - public void validate() throws SemanticException { + public void validate() throws HiveException { LOG.debug("validation start"); // Validate inputs and outputs have right protectmode to execute the query for (ReadEntity readEntity : getInputs()) { @@ -9258,18 +9243,13 @@ public void validate() throws SemanticException { if (type == WriteEntity.Type.PARTITION || type == WriteEntity.Type.DUMMYPARTITION) { - String conflictingArchive; - try { - Partition usedp = writeEntity.getPartition(); - Table tbl = usedp.getTable(); + Partition usedp = writeEntity.getPartition(); + Table tbl = usedp.getTable(); - LOG.debug("validated " + usedp.getName()); - LOG.debug(usedp.getTable()); - conflictingArchive = ArchiveUtils - .conflictingArchiveNameOrNull(db, tbl, usedp.getSpec()); - } catch (HiveException e) { - throw new SemanticException(e); - } + LOG.debug("validated " + usedp.getName()); + LOG.debug(usedp.getTable()); + String conflictingArchive = ArchiveUtils + .conflictingArchiveNameOrNull(db, tbl, usedp.getSpec()); if (conflictingArchive != null) { String message = String.format("Insert conflict with existing archive: %s", conflictingArchive); @@ -9292,17 +9272,13 @@ public void validate() throws SemanticException { // If it is a partition, Partition's metastore is not fetched. We // need to fetch it. - try { - p = Hive.get().getPartition( - inputPartition.getTable(), inputPartition.getSpec(), false); - if (p != null) { - tbl = p.getTable(); - } else { - // if p is null, we assume that we insert to a new partition - tbl = inputPartition.getTable(); - } - } catch (HiveException e) { - throw new SemanticException(e); + p = Hive.get().getPartition( + inputPartition.getTable(), inputPartition.getSpec(), false); + if (p != null) { + tbl = p.getTable(); + } else { + // if p is null, we assume that we insert to a new partition + tbl = inputPartition.getTable(); } if (type == WriteEntity.Type.PARTITION && p != null && p.isOffline()) { @@ -9335,7 +9311,7 @@ public void validate() throws SemanticException { } private void validate(Task task, boolean reworkMapredWork) - throws SemanticException { + throws HiveException { Utilities.reworkMapRedWork(task, reworkMapredWork, conf); if (task.getChildTasks() == null) { return; @@ -9392,7 +9368,7 @@ public RowResolver getRowResolver(Operator opt) { * to the SerDe and Storage Format. */ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) - throws SemanticException { + throws HiveException { String tableName = getUnescapedName((ASTNode) ast.getChild(0)); String likeTableName = null; List cols = new ArrayList(); @@ -9599,18 +9575,13 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) case CTAS: // create table as select // Verify that the table does not already exist - String databaseName; - try { - Table dumpTable = db.newTable(tableName); - databaseName = dumpTable.getDbName(); - if (null == db.getDatabase(dumpTable.getDbName())) { - throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(dumpTable.getDbName())); - } - if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false)) { - throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(tableName)); - } - } catch (HiveException e) { - throw new SemanticException(e); + Table dumpTable = db.newTable(tableName); + String databaseName = dumpTable.getDbName(); + if (null == db.getDatabase(dumpTable.getDbName())) { + throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(dumpTable.getDbName())); + } + if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false)) { + throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(tableName)); } tblProps = addDefaultProperties(tblProps); @@ -9637,7 +9608,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) } private ASTNode analyzeCreateView(ASTNode ast, QB qb) - throws SemanticException { + throws HiveException { String tableName = getUnescapedName((ASTNode) ast.getChild(0)); List cols = null; boolean ifNotExists = false; @@ -9704,7 +9675,7 @@ private ASTNode analyzeCreateView(ASTNode ast, QB qb) // the statement could be CREATE VIEW, REPLACE VIEW, or ALTER VIEW AS SELECT // check semantic conditions private void validateCreateView(CreateViewDesc createVwDesc) - throws SemanticException { + throws HiveException { try { Table oldView = getTableWithQN(createVwDesc.getViewName(), false); @@ -9752,7 +9723,7 @@ private void validateCreateView(CreateViewDesc createVwDesc) } // Process the position alias in GROUPBY and ORDERBY - private void processPositionAlias(ASTNode ast) throws SemanticException { + private void processPositionAlias(ASTNode ast) throws HiveException { if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_GROUPBY_ORDERBY_POSITION_ALIAS) == false) { return; @@ -9848,9 +9819,9 @@ private void processPositionAlias(ASTNode ast) throws SemanticException { * separate it from noscan command process so that it provides us flexibility * * @param tree - * @throws SemanticException + * @throws HiveException */ - protected void processPartialScanCommand (ASTNode tree) throws SemanticException { + protected void processPartialScanCommand (ASTNode tree) throws HiveException { // check if it is partial scan command this.checkPartialScan(tree); @@ -9863,9 +9834,9 @@ protected void processPartialScanCommand (ASTNode tree) throws SemanticException /** * process analyze ... noscan command * @param tree - * @throws SemanticException + * @throws HiveException */ - protected void processNoScanCommand (ASTNode tree) throws SemanticException { + protected void processNoScanCommand (ASTNode tree) throws HiveException { // check if it is noscan command checkNoScan(tree); @@ -9879,9 +9850,9 @@ protected void processNoScanCommand (ASTNode tree) throws SemanticException { * Validate noscan command * * @param tree - * @throws SemanticException + * @throws HiveException */ - private void validateAnalyzeNoscan(ASTNode tree) throws SemanticException { + private void validateAnalyzeNoscan(ASTNode tree) throws HiveException { // since it is noscan, it is true table name in command String tableName = getUnescapedName((ASTNode) tree.getChild(0).getChild(0)); Table tbl; @@ -9904,9 +9875,9 @@ private void validateAnalyzeNoscan(ASTNode tree) throws SemanticException { * Validate partialscan command * * @param tree - * @throws SemanticException + * @throws HiveException */ - private void validateAnalyzePartialscan(ASTNode tree) throws SemanticException { + private void validateAnalyzePartialscan(ASTNode tree) throws HiveException { // since it is partialscan, it is true table name in command String tableName = getUnescapedName((ASTNode) tree.getChild(0).getChild(0)); Table tbl; @@ -9995,7 +9966,7 @@ public void setQB(QB qb) { * - For a SubQuery: set the source to the alias returned by processSubQuery * - For a PTF invocation: recursively call processPTFChain. */ - private PTFInputSpec processPTFSource(QB qb, ASTNode inputNode) throws SemanticException{ + private PTFInputSpec processPTFSource(QB qb, ASTNode inputNode) throws HiveException{ PTFInputSpec qInSpec = null; int type = inputNode.getType(); @@ -10034,7 +10005,7 @@ private PTFInputSpec processPTFSource(QB qb, ASTNode inputNode) throws SemanticE * PTF invocation. */ private PartitionedTableFunctionSpec processPTFChain(QB qb, ASTNode ptf) - throws SemanticException{ + throws HiveException{ int child_count = ptf.getChildCount(); if (child_count < 2) { throw new SemanticException(generateErrorMessage(ptf, @@ -10098,7 +10069,7 @@ private PartitionedTableFunctionSpec processPTFChain(QB qb, ASTNode ptf) * ^(TOK_PTBLFUNCTION name partitionTableFunctionSource partitioningSpec? arguments*) * - setup a PTFInvocationSpec for this top level PTF invocation. */ - private void processPTF(QB qb, ASTNode ptf) throws SemanticException{ + private void processPTF(QB qb, ASTNode ptf) throws HiveException{ PartitionedTableFunctionSpec ptfSpec = processPTFChain(qb, ptf); @@ -10112,7 +10083,7 @@ private void processPTF(QB qb, ASTNode ptf) throws SemanticException{ } private void handleQueryWindowClauses(QB qb, Phase1Ctx ctx_1, ASTNode node) - throws SemanticException { + throws HiveException { WindowingSpec spec = qb.getWindowingSpec(ctx_1.dest); for(int i=0; i < node.getChildCount(); i++) { processQueryWindowClause(spec, (ASTNode) node.getChild(i)); @@ -10175,7 +10146,7 @@ else if ( type == HiveParser.TOK_SORTBY || type == HiveParser.TOK_ORDERBY ) { } private WindowFunctionSpec processWindowFunction(ASTNode node, ASTNode wsNode) - throws SemanticException { + throws HiveException { WindowFunctionSpec wfSpec = new WindowFunctionSpec(); switch(node.getType()) { @@ -10234,7 +10205,7 @@ private boolean containsLeadLagUDF(ASTNode expressionTree) { } private void processQueryWindowClause(WindowingSpec spec, ASTNode node) - throws SemanticException { + throws HiveException { ASTNode nameNode = (ASTNode) node.getChild(0); ASTNode wsNode = (ASTNode) node.getChild(1); if(spec.getWindowSpecs() != null && spec.getWindowSpecs().containsKey(nameNode.getText())){ @@ -10246,7 +10217,7 @@ private void processQueryWindowClause(WindowingSpec spec, ASTNode node) spec.addWindowSpec(nameNode.getText(), ws); } - private WindowSpec processWindowSpec(ASTNode node) throws SemanticException { + private WindowSpec processWindowSpec(ASTNode node) throws HiveException { String sourceId = null; PartitionSpec partition = null; OrderSpec order = null; @@ -10296,7 +10267,7 @@ private WindowSpec processWindowSpec(ASTNode node) throws SemanticException { return ws; } - private WindowFrameSpec processWindowFrame(ASTNode node) throws SemanticException { + private WindowFrameSpec processWindowFrame(ASTNode node) throws HiveException { int type = node.getType(); BoundarySpec start = null, end = null; @@ -10313,7 +10284,7 @@ private WindowFrameSpec processWindowFrame(ASTNode node) throws SemanticExceptio return new WindowFrameSpec(start, end); } - private BoundarySpec processBoundary(int frameType, ASTNode node) throws SemanticException { + private BoundarySpec processBoundary(int frameType, ASTNode node) throws HiveException { BoundarySpec bs = frameType == HiveParser.TOK_WINDOWRANGE ? new RangeBoundarySpec() : new ValueBoundarySpec(); int type = node.getType(); @@ -10452,14 +10423,14 @@ private boolean isValidGroupBySelectList(QB currQB, String clause){ //--------------------------- PTF handling: PTFInvocationSpec to PTFDesc -------------------------- private PTFDesc translatePTFInvocationSpec(PTFInvocationSpec ptfQSpec, RowResolver inputRR) - throws SemanticException{ + throws HiveException{ PTFDesc ptfDesc = null; PTFTranslator translator = new PTFTranslator(); ptfDesc = translator.translate(ptfQSpec, this, conf, inputRR, unparseTranslator); return ptfDesc; } - Operator genPTFPlan(PTFInvocationSpec ptfQSpec, Operator input) throws SemanticException { + Operator genPTFPlan(PTFInvocationSpec ptfQSpec, Operator input) throws HiveException { ArrayList componentQueries = PTFTranslator.componentize(ptfQSpec); for (PTFInvocationSpec ptfSpec : componentQueries) { input = genPTFPlanForComponentQuery(ptfSpec, input); @@ -10476,7 +10447,7 @@ Operator genPTFPlan(PTFInvocationSpec ptfQSpec, Operator input) throws SemanticE * columns and order columns. Use the input definition to construct the list * of output columns for the ReduceSinkOperator * - * @throws SemanticException + * @throws HiveException */ void buildPTFReduceSinkDetails(PartitionedTableFunctionDef tabDef, RowResolver inputRR, @@ -10487,7 +10458,7 @@ void buildPTFReduceSinkDetails(PartitionedTableFunctionDef tabDef, List outputColumnNames, StringBuilder orderString, RowResolver rsOpRR, - RowResolver extractRR) throws SemanticException { + RowResolver extractRR) throws HiveException { List partColList = tabDef.getPartition().getExpressions(); @@ -10582,7 +10553,7 @@ void buildPTFReduceSinkDetails(PartitionedTableFunctionDef tabDef, } private Operator genPTFPlanForComponentQuery(PTFInvocationSpec ptfQSpec, Operator input) - throws SemanticException { + throws HiveException { /* * 1. Create the PTFDesc from the Qspec attached to this QB. */ @@ -10711,7 +10682,7 @@ private Operator genPTFPlanForComponentQuery(PTFInvocationSpec ptfQSpec, Operato //--------------------------- Windowing handling: PTFInvocationSpec to PTFDesc -------------------- - Operator genWindowingPlan(WindowingSpec wSpec, Operator input) throws SemanticException { + Operator genWindowingPlan(WindowingSpec wSpec, Operator input) throws HiveException { wSpec.validateAndMakeEffective(); WindowingComponentizer groups = new WindowingComponentizer(wSpec); RowResolver rr = opParseCtx.get(input).getRowResolver(); @@ -10734,7 +10705,7 @@ Operator genWindowingPlan(WindowingSpec wSpec, Operator input) throws SemanticEx private Operator genReduceSinkPlanForWindowing(WindowingSpec spec, RowResolver inputRR, - Operator input) throws SemanticException{ + Operator input) throws HiveException{ ArrayList partCols = new ArrayList(); ArrayList valueCols = new ArrayList(); ArrayList orderCols = new ArrayList(); @@ -10860,14 +10831,14 @@ private Operator genReduceSinkPlanForWindowing(WindowingSpec spec, public static ArrayList parseSelect(String selectExprStr) - throws SemanticException + throws HiveException { ASTNode selNode = null; try { ParseDriver pd = new ParseDriver(); selNode = pd.parseSelect(selectExprStr, null); } catch (ParseException pe) { - throw new SemanticException(pe); + throw HiveException.wrap(pe); } ArrayList selSpec = new ArrayList(); @@ -10934,7 +10905,7 @@ private void addAlternateGByKeyMappings(ASTNode gByExpr, ColumnInfo colInfo, try { ColumnInfo pColInfo = parentRR.get(tab_alias, col_alias); tab_alias = pColInfo == null ? null : pColInfo.getTabAlias(); - } catch(SemanticException se) { + } catch (HiveException se) { } gByRR.put(tab_alias, col_alias, colInfo); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 835a654..ce3c2e9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -21,6 +21,7 @@ import java.util.HashMap; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.session.SessionState; @@ -138,7 +139,7 @@ } public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) - throws SemanticException { + throws HiveException { if (tree.getToken() == null) { throw new RuntimeException("Empty Syntax Tree"); } else { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java index 8ffbe07..6cc6e0a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java @@ -9,6 +9,7 @@ import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.QBSubQuery.SubQueryType; import org.apache.hadoop.hive.ql.parse.QBSubQuery.SubQueryTypeDef; @@ -28,7 +29,7 @@ static void extractConjuncts(ASTNode node, List conjuncts) { * return the remaining WhereClause. */ static ASTNode rewriteParentQueryWhere(ASTNode whereCond, ASTNode subQuery) - throws SemanticException { + throws HiveException { ParentQueryWhereClauseRewrite rewrite = new ParentQueryWhereClauseRewrite(whereCond, subQuery); return rewrite.remove(); @@ -156,7 +157,7 @@ else if (gParent == null) { } } - ASTNode remove() throws SemanticException { + ASTNode remove() throws HiveException { boolean r = removeSubQuery(root); if (r) { return root; @@ -170,7 +171,7 @@ ASTNode remove() throws SemanticException { } static List findSubQueries(ASTNode node) - throws SemanticException { + throws HiveException { List subQueries = new ArrayList(); findSubQueries(node, subQueries); return subQueries; @@ -193,7 +194,7 @@ static QBSubQuery buildSubQuery(String outerQueryId, int sqIdx, ASTNode sqAST, ASTNode originalSQAST, - Context ctx) throws SemanticException { + Context ctx) throws HiveException { ASTNode sqOp = (ASTNode) sqAST.getChild(0); ASTNode sq = (ASTNode) sqAST.getChild(1); ASTNode outerQueryExpr = (ASTNode) sqAST.getChild(2); @@ -213,7 +214,7 @@ static QBSubQuery buildSubQuery(String outerQueryId, ctx); } - static SubQueryTypeDef buildSQOperator(ASTNode astSQOp) throws SemanticException { + static SubQueryTypeDef buildSQOperator(ASTNode astSQOp) throws HiveException { ASTNode opAST = (ASTNode) astSQOp.getChild(0); SubQueryType type = SubQueryType.get(opAST); return new SubQueryTypeDef(opAST, type); @@ -226,7 +227,7 @@ static SubQueryTypeDef buildSQOperator(ASTNode astSQOp) throws SemanticException * 1 if implies aggregation * 2 if implies windowing */ - static int checkAggOrWindowing(ASTNode expressionTree) throws SemanticException { + static int checkAggOrWindowing(ASTNode expressionTree) throws HiveException { int exprTokenType = expressionTree.getToken().getType(); if (exprTokenType == HiveParser.TOK_FUNCTION || exprTokenType == HiveParser.TOK_FUNCTIONDI diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java index d847a16..963678f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; @@ -71,7 +72,7 @@ public TableAccessAnalyzer(ParseContext pactx) { pGraphContext = pactx; } - public TableAccessInfo analyzeTableAccess() throws SemanticException { + public TableAccessInfo analyzeTableAccess() throws HiveException { // Set up the rules for the graph walker for group by and join operators Map opRules = new LinkedHashMap(); @@ -98,7 +99,7 @@ private NodeProcessor getDefaultProc() { return new NodeProcessor() { @Override public Object process(Node nd, Stack stack, - NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { + NodeProcessorCtx procCtx, Object... nodeOutputs) throws HiveException { return null; } }; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index b569ed0..524bbfb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -78,7 +78,7 @@ public void init(HiveConf conf, LogHelper console, Hive db) { @SuppressWarnings({"nls", "unchecked"}) public void compile(final ParseContext pCtx, final List> rootTasks, - final HashSet inputs, final HashSet outputs) throws SemanticException { + final HashSet inputs, final HashSet outputs) throws HiveException { Context ctx = pCtx.getContext(); GlobalLimitCtx globalLimitCtx = pCtx.getGlobalLimitCtx(); @@ -174,10 +174,8 @@ public void compile(final ParseContext pCtx, final List task, * Called to transform tasks into local tasks where possible/desirable */ protected abstract void decideExecMode(List> rootTasks, Context ctx, - GlobalLimitCtx globalLimitCtx) throws SemanticException; + GlobalLimitCtx globalLimitCtx) throws HiveException; /* * Called at the beginning of the compile phase to have another chance to optimize the operator plan */ protected void optimizeOperatorPlan(ParseContext pCtxSet, Set inputs, - Set outputs) throws SemanticException { + Set outputs) throws HiveException { } /* * Called after the tasks have been generated to run another round of optimization */ protected abstract void optimizeTaskPlan(List> rootTasks, - ParseContext pCtx, Context ctx) throws SemanticException; + ParseContext pCtx, Context ctx) throws HiveException; /* * Called to set the appropriate input format for tasks @@ -358,7 +356,7 @@ protected abstract void optimizeTaskPlan(List> root * Called to generate the taks tree from the parse context/operator tree */ protected abstract void generateTaskTree(List> rootTasks, ParseContext pCtx, - List> mvTask, Set inputs, Set outputs) throws SemanticException; + List> mvTask, Set inputs, Set outputs) throws HiveException; /** * Create a clone of the parse context diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java index dff743f..39c101b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.ConvertJoinMapJoin; import org.apache.hadoop.hive.ql.optimizer.ReduceSinkMapJoinProc; import org.apache.hadoop.hive.ql.optimizer.SetReducerParallelism; @@ -75,7 +76,7 @@ public TezCompiler() { @Override protected void optimizeOperatorPlan(ParseContext pCtx, Set inputs, - Set outputs) throws SemanticException { + Set outputs) throws HiveException { // Sequence of TableScan operators to be walked Deque> deque = new LinkedList>(); @@ -107,7 +108,7 @@ protected void optimizeOperatorPlan(ParseContext pCtx, Set inputs, @Override protected void generateTaskTree(List> rootTasks, ParseContext pCtx, List> mvTask, Set inputs, Set outputs) - throws SemanticException { + throws HiveException { ParseContext tempParseContext = getParseContext(pCtx, rootTasks); GenTezWork genTezWork = new GenTezWork(); @@ -136,7 +137,7 @@ protected void generateTaskTree(List> rootTasks, Pa { @Override public Object process(Node n, Stack s, - NodeProcessorCtx procCtx, Object... os) throws SemanticException { + NodeProcessorCtx procCtx, Object... os) throws HiveException { throw new SemanticException("Unions not yet supported on Tez." +" Please use MR for this query"); } @@ -197,14 +198,14 @@ private void setInputFormat(MapWork work, Operator op) { @Override protected void decideExecMode(List> rootTasks, Context ctx, GlobalLimitCtx globalLimitCtx) - throws SemanticException { + throws HiveException { // currently all Tez work is on the cluster return; } @Override protected void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, - Context ctx) throws SemanticException { + Context ctx) throws HiveException { PhysicalContext physicalCtx = new PhysicalContext(conf, pCtx, pCtx.getContext(), rootTasks, pCtx.getFetchTask()); if (conf.getBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED)) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TezWalker.java ql/src/java/org/apache/hadoop/hive/ql/parse/TezWalker.java index 2f63c1a..3f20db5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/TezWalker.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TezWalker.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; import org.apache.hadoop.hive.ql.lib.Dispatcher; import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * Walks the operator tree in DFS fashion. @@ -46,7 +47,7 @@ public TezWalker(Dispatcher disp) { * operator being walked */ @Override - public void walk(Node nd) throws SemanticException { + public void walk(Node nd) throws HiveException { List children = nd.getChildren(); // maintain the stack of operators encountered diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java index 9a947ec..1f9c66b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnListDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; @@ -105,7 +106,7 @@ private TypeCheckProcFactory() { * @return exprNodeColumnDesc. */ public static ExprNodeDesc processGByExpr(Node nd, Object procCtx) - throws SemanticException { + throws HiveException { // We recursively create the exprNodeDesc. Base cases: when we encounter // a column ref, we convert that into an exprNodeColumnDesc; when we // encounter @@ -138,7 +139,7 @@ public static ExprNodeDesc processGByExpr(Node nd, Object procCtx) } public static Map genExprNode(ASTNode expr, - TypeCheckCtx tcCtx) throws SemanticException { + TypeCheckCtx tcCtx) throws HiveException { // Create the walker, the rules dispatcher and the context. // create a walker which walks the tree in a DFS manner while maintaining // the operator stack. The dispatcher @@ -207,7 +208,7 @@ public static ExprNodeDesc processGByExpr(Node nd, Object procCtx) @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { TypeCheckCtx ctx = (TypeCheckCtx) procCtx; if (ctx.getError() != null) { @@ -240,7 +241,7 @@ public static NullExprProcessor getNullExprProcessor() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { TypeCheckCtx ctx = (TypeCheckCtx) procCtx; if (ctx.getError() != null) { @@ -315,7 +316,7 @@ public static NumExprProcessor getNumExprProcessor() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { TypeCheckCtx ctx = (TypeCheckCtx) procCtx; if (ctx.getError() != null) { @@ -373,7 +374,7 @@ public static StrExprProcessor getStrExprProcessor() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { TypeCheckCtx ctx = (TypeCheckCtx) procCtx; if (ctx.getError() != null) { @@ -419,7 +420,7 @@ public static BoolExprProcessor getBoolExprProcessor() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { TypeCheckCtx ctx = (TypeCheckCtx) procCtx; if (ctx.getError() != null) { @@ -460,7 +461,7 @@ public static DateExprProcessor getDateExprProcessor() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { TypeCheckCtx ctx = (TypeCheckCtx) procCtx; if (ctx.getError() != null) { @@ -702,7 +703,7 @@ public static ExprNodeDesc getFuncExprNodeDesc(String udfName, static ExprNodeDesc getXpathOrFuncExprNodeDesc(ASTNode expr, boolean isFunction, ArrayList children, TypeCheckCtx ctx) - throws SemanticException, UDFArgumentException { + throws HiveException, UDFArgumentException { // return the child directly if the conversion is redundant. if (isRedundantConversionFunction(expr, isFunction, children)) { assert (children.size() == 1); @@ -965,7 +966,7 @@ private boolean isDescendant(Node ans, Node des) { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { TypeCheckCtx ctx = (TypeCheckCtx) procCtx; @@ -1156,7 +1157,7 @@ public static DefaultExprProcessor getDefaultExprProcessor() { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { TypeCheckCtx ctx = (TypeCheckCtx) procCtx; if (ctx.getError() != null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingComponentizer.java ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingComponentizer.java index cf5ad60..1d1b6a8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingComponentizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingComponentizer.java @@ -23,6 +23,7 @@ import java.util.Map; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitioningSpec; import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowExpressionSpec; import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFunctionSpec; @@ -46,14 +47,14 @@ WindowingSpec originalSpec; LinkedHashMap groups; - public WindowingComponentizer(WindowingSpec originalSpec) throws SemanticException { + public WindowingComponentizer(WindowingSpec originalSpec) throws HiveException { super(); this.originalSpec = originalSpec; groups = new LinkedHashMap(); groupFunctions(); } - private void groupFunctions() throws SemanticException { + private void groupFunctions() throws HiveException { for (WindowExpressionSpec expr : originalSpec.getWindowExpressions()) { WindowFunctionSpec wFn = (WindowFunctionSpec) expr; PartitioningSpec wFnGrp = wFn.getWindowSpec().getPartitioning(); @@ -73,7 +74,7 @@ public boolean hasNext() { public WindowingSpec next(HiveConf hCfg, SemanticAnalyzer semAly, UnparseTranslator unparseT, - RowResolver inputRR) throws SemanticException { + RowResolver inputRR) throws HiveException { SemanticException originalException = null; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java index 28afc6b..4c2c455 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java @@ -24,6 +24,7 @@ import org.antlr.runtime.CommonToken; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.WindowFunctionInfo; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionExpression; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionSpec; @@ -128,7 +129,7 @@ public OrderSpec getQueryOrderSpec() { * - Validate the effective Window Frames with the rules in {@link validateWindowFrame} * - If there is no Order, then add the Partition expressions as the Order. */ - public void validateAndMakeEffective() throws SemanticException { + public void validateAndMakeEffective() throws HiveException { for(WindowExpressionSpec expr : getWindowExpressions()) { WindowFunctionSpec wFn = (WindowFunctionSpec) expr; WindowSpec wdwSpec = wFn.getWindowSpec(); @@ -160,7 +161,7 @@ public void validateAndMakeEffective() throws SemanticException { } private void fillInWindowSpec(String sourceId, WindowSpec dest, ArrayList visited) - throws SemanticException + throws HiveException { if (sourceId != null) { @@ -262,7 +263,7 @@ else if ( wFrame.getEnd() == null ) { } } - private void validateWindowFrame(WindowSpec wdwSpec) throws SemanticException { + private void validateWindowFrame(WindowSpec wdwSpec) throws HiveException { WindowFrameSpec wFrame = wdwSpec.getWindowFrame(); BoundarySpec start = wFrame.getStart(); BoundarySpec end = wFrame.getEnd(); @@ -281,7 +282,7 @@ private void validateWindowFrame(WindowSpec wdwSpec) throws SemanticException { validateValueBoundary(wFrame.getEnd(), wdwSpec.getOrder()); } - private void validateValueBoundary(BoundarySpec bs, OrderSpec order) throws SemanticException { + private void validateValueBoundary(BoundarySpec bs, OrderSpec order) throws HiveException { if ( bs instanceof ValueBoundarySpec ) { ValueBoundarySpec vbs = (ValueBoundarySpec) bs; if ( order != null ) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java index 1416c2e..5ff3250 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java @@ -24,8 +24,8 @@ import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ASTNode; -import org.apache.hadoop.hive.ql.parse.SemanticException; /** * HiveAuthorizationTaskFactory creates DDL authorization related @@ -34,26 +34,26 @@ */ public interface HiveAuthorizationTaskFactory { public Task createCreateRoleTask(ASTNode node, HashSet inputs, - HashSet outputs) throws SemanticException; + HashSet outputs) throws HiveException; public Task createDropRoleTask(ASTNode node, HashSet inputs, - HashSet outputs) throws SemanticException; + HashSet outputs) throws HiveException; public Task createShowRoleGrantTask(ASTNode node, Path resultFile, - HashSet inputs, HashSet outputs) throws SemanticException; + HashSet inputs, HashSet outputs) throws HiveException; public Task createGrantRoleTask(ASTNode node, HashSet inputs, - HashSet outputs) throws SemanticException; + HashSet outputs) throws HiveException; public Task createRevokeRoleTask(ASTNode node, HashSet inputs, - HashSet outputs) throws SemanticException; + HashSet outputs) throws HiveException; public Task createGrantTask(ASTNode node, HashSet inputs, - HashSet outputs) throws SemanticException; + HashSet outputs) throws HiveException; public Task createShowGrantTask(ASTNode node, Path resultFile, HashSet inputs, - HashSet outputs) throws SemanticException; + HashSet outputs) throws HiveException; public Task createRevokeTask(ASTNode node, HashSet inputs, - HashSet outputs) throws SemanticException; + HashSet outputs) throws HiveException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java index c41cd0f..b50a6cf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java @@ -104,7 +104,7 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { } @Override public Task createGrantTask(ASTNode ast, HashSet inputs, - HashSet outputs) throws SemanticException { + HashSet outputs) throws HiveException { List privilegeDesc = analyzePrivilegeListDef( (ASTNode) ast.getChild(0)); List principalDesc = analyzePrincipalListDef( @@ -135,7 +135,7 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { } @Override public Task createRevokeTask(ASTNode ast, HashSet inputs, - HashSet outputs) throws SemanticException { + HashSet outputs) throws HiveException { List privilegeDesc = analyzePrivilegeListDef((ASTNode) ast.getChild(0)); List principalDesc = analyzePrincipalListDef((ASTNode) ast.getChild(1)); PrivilegeObjectDesc hiveObj = null; @@ -153,7 +153,7 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { } @Override public Task createShowGrantTask(ASTNode ast, Path resultFile, HashSet inputs, - HashSet outputs) throws SemanticException { + HashSet outputs) throws HiveException { PrivilegeObjectDesc privHiveObj = null; ASTNode principal = (ASTNode) ast.getChild(0); @@ -242,7 +242,7 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { private PrivilegeObjectDesc analyzePrivilegeObject(ASTNode ast, HashSet outputs) - throws SemanticException { + throws HiveException { PrivilegeObjectDesc subject = new PrivilegeObjectDesc(); //set object identifier @@ -295,7 +295,7 @@ private PrivilegeObjectDesc analyzePrivilegeObject(ASTNode ast, } private List analyzePrivilegeListDef(ASTNode node) - throws SemanticException { + throws HiveException { List ret = new ArrayList(); for (int i = 0; i < node.getChildCount(); i++) { ASTNode privilegeDef = (ASTNode) node.getChild(i); @@ -316,7 +316,7 @@ private PrivilegeObjectDesc analyzePrivilegeObject(ASTNode ast, } private Table getTable(String database, String tblName) - throws SemanticException { + throws HiveException { try { Table tab = database == null ? db.getTable(tblName, false) : db.getTable(database, tblName, false); @@ -333,7 +333,7 @@ private Table getTable(String database, String tblName) } private Partition getPartition(Table table, Map partSpec) - throws SemanticException { + throws HiveException { try { Partition partition = db.getPartition(table, partSpec, false); if (partition == null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index f6a3b43..e14ff47 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -27,9 +27,9 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.ParseUtils; -import org.apache.hadoop.hive.ql.parse.SemanticException; /** * AlterTableDesc. @@ -649,9 +649,9 @@ public void setSkewedColValues(List> skewedColValues) { /** * Validate alter table description. * - * @throws SemanticException + * @throws HiveException */ - public void validate() throws SemanticException { + public void validate() throws HiveException { if (null != table) { /* Validate skewed information. */ ValidationUtility.validateSkewedInformation( diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java index d1b729c..44ddfc5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.ParseUtils; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -396,7 +397,7 @@ public void setSkewedColValues(ArrayList> skewedColValues) { } public void validate() - throws SemanticException { + throws HiveException { if ((this.getCols() == null) || (this.getCols().size() == 0)) { // for now make sure that serde exists diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java index 96c8d89..f0cb11b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; @@ -179,7 +180,7 @@ public static boolean isDeterministic(ExprNodeDesc desc) { * is an ancestor of current or null (back to top operator). */ public static ArrayList backtrack(List sources, - Operator current, Operator terminal) throws SemanticException { + Operator current, Operator terminal) throws HiveException { ArrayList result = new ArrayList(); for (ExprNodeDesc expr : sources) { result.add(backtrack(expr, current, terminal)); @@ -188,7 +189,7 @@ public static boolean isDeterministic(ExprNodeDesc desc) { } public static ExprNodeDesc backtrack(ExprNodeDesc source, Operator current, - Operator terminal) throws SemanticException { + Operator terminal) throws HiveException { Operator parent = getSingleParent(current, terminal); if (parent == null) { return source; @@ -215,7 +216,7 @@ public static ExprNodeDesc backtrack(ExprNodeDesc source, Operator current, // Resolve column expression to input expression by using expression mapping in current operator private static ExprNodeDesc backtrack(ExprNodeColumnDesc column, Operator current, - Operator terminal) throws SemanticException { + Operator terminal) throws HiveException { Map mapping = current.getColumnExprMap(); if (mapping == null || !mapping.containsKey(column.getColumn())) { return backtrack((ExprNodeDesc)column, current, terminal); @@ -225,7 +226,7 @@ private static ExprNodeDesc backtrack(ExprNodeColumnDesc column, Operator cur } public static Operator getSingleParent(Operator current, Operator terminal) - throws SemanticException { + throws HiveException { if (current == terminal) { return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDeserializer.java ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDeserializer.java index 3a258e4..62b9a93 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDeserializer.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDeserializer.java @@ -265,7 +265,7 @@ protected void initialize(ShapeDetails shp, StructObjectInspector OI) throws Hiv StructObjectInspector outOI = PTFPartition.setupPartitionOutputOI(serDe, OI); shp.setOI(outOI); } catch (Exception se) { - throw new HiveException(se); + throw HiveException.wrap(se); } } @@ -287,7 +287,7 @@ private TableFunctionResolver constructResolver(String className) throws HiveExc Class.forName(className); return (TableFunctionResolver) ReflectionUtils.newInstance(rCls, null); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index 6ee6bee..50a37ee 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -682,7 +682,7 @@ public static ReduceSinkDesc getReduceSinkDesc( public static ReduceSinkDesc getReduceSinkDesc( ArrayList keyCols, ArrayList valueCols, List outputColumnNames, boolean includeKey, int tag, - int numPartitionFields, int numReducers) throws SemanticException { + int numPartitionFields, int numReducers) throws HiveException { return getReduceSinkDesc(keyCols, keyCols.size(), valueCols, new ArrayList>(), includeKey ? outputColumnNames.subList(0, keyCols.size()) : @@ -724,7 +724,7 @@ public static ReduceSinkDesc getReduceSinkDesc( List> distinctColIndices, List outputKeyColumnNames, List outputValueColumnNames, boolean includeKey, int tag, - int numPartitionFields, int numReducers) throws SemanticException { + int numPartitionFields, int numReducers) throws HiveException { ArrayList partitionCols = null; if (numPartitionFields >= keyCols.size()) { diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ValidationUtility.java ql/src/java/org/apache/hadoop/hive/ql/plan/ValidationUtility.java index 09909ac..6cba915 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ValidationUtility.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ValidationUtility.java @@ -23,6 +23,7 @@ import java.util.Set; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; /** @@ -45,10 +46,10 @@ private ValidationUtility () { * @param colNames column names * @param skewedColNames skewed column names * @param skewedColValues skewed column values - * @throws SemanticException + * @throws HiveException */ public static void validateSkewedInformation(List colNames, List skewedColNames, - List> skewedColValues) throws SemanticException { + List> skewedColValues) throws HiveException { if (skewedColNames.size() > 0) { /** * all columns in skewed column name are valid columns @@ -86,10 +87,10 @@ public static void validateSkewedInformation(List colNames, List * * @param skewedColNames * @param skewedColValues - * @throws SemanticException + * @throws HiveException */ public static void validateSkewedColNameValueNumberMatch(List skewedColNames, - List> skewedColValues) throws SemanticException { + List> skewedColValues) throws HiveException { for (List colValue : skewedColValues) { if (colValue.size() != skewedColNames.size()) { throw new SemanticException( @@ -105,10 +106,10 @@ public static void validateSkewedColNameValueNumberMatch(List skewedColN * * @param colNames * @param skewedColNames - * @throws SemanticException + * @throws HiveException */ public static void validateSkewedColNames(List colNames, List skewedColNames) - throws SemanticException { + throws HiveException { // make a copy List copySkewedColNames = new ArrayList(skewedColNames); // remove valid columns @@ -128,10 +129,10 @@ public static void validateSkewedColNames(List colNames, List sk * Find out duplicate name. * * @param names - * @throws SemanticException + * @throws HiveException */ public static void validateSkewedColumnNameUniqueness(List names) - throws SemanticException { + throws HiveException { Set lookup = new HashSet(); for (String name : names) { if (lookup.contains(name)) { diff --git ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java index 4175d11..9a218d5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java @@ -37,8 +37,8 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.RowResolver; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc; @@ -66,7 +66,7 @@ */ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ExprWalkerInfo ctx = (ExprWalkerInfo) procCtx; ExprNodeColumnDesc colref = (ExprNodeColumnDesc) nd; RowResolver toRR = ctx.getToRR(); @@ -111,7 +111,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ExprWalkerInfo ctx = (ExprWalkerInfo) procCtx; String alias = null; ExprNodeFieldDesc expr = (ExprNodeFieldDesc) nd; @@ -154,7 +154,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ExprWalkerInfo ctx = (ExprWalkerInfo) procCtx; String alias = null; ExprNodeGenericFuncDesc expr = (ExprNodeGenericFuncDesc) nd; @@ -206,7 +206,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ExprWalkerInfo ctx = (ExprWalkerInfo) procCtx; ctx.setIsCandidate((ExprNodeDesc) nd, true); return true; @@ -231,7 +231,7 @@ private static NodeProcessor getFieldProcessor() { public static ExprWalkerInfo extractPushdownPreds(OpWalkerInfo opContext, Operator op, ExprNodeDesc pred) - throws SemanticException { + throws HiveException { List preds = new ArrayList(); preds.add(pred); return extractPushdownPreds(opContext, op, preds); @@ -246,11 +246,11 @@ public static ExprWalkerInfo extractPushdownPreds(OpWalkerInfo opContext, * operator of the predicates being processed * @param preds * @return The expression walker information - * @throws SemanticException + * @throws HiveException */ public static ExprWalkerInfo extractPushdownPreds(OpWalkerInfo opContext, Operator op, List preds) - throws SemanticException { + throws HiveException { // Create the walker, the rules dispatcher and the context. ExprWalkerInfo exprContext = new ExprWalkerInfo(op, opContext .getRowResolver(op)); diff --git ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java index 40298e1..8bf3d6f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler; import org.apache.hadoop.hive.ql.metadata.Table; @@ -51,7 +52,6 @@ import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.OpParseContext; import org.apache.hadoop.hive.ql.parse.RowResolver; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils; @@ -88,7 +88,7 @@ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { LOG.info("Processing for " + nd.getName() + "(" + ((Operator) nd).getIdentifier() + ")"); // script operator is a black-box to hive so no optimization here @@ -109,7 +109,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, public static class UDTFPPD extends DefaultPPD implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { LOG.info("Processing for " + nd.getName() + "(" + ((Operator) nd).getIdentifier() + ")"); //Predicates for UDTF wont be candidates for its children. So, nothing to @@ -123,7 +123,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { LOG.info("Processing for " + nd.getName() + "(" + ((Operator) nd).getIdentifier() + ")"); OpWalkerInfo owi = (OpWalkerInfo) procCtx; @@ -150,7 +150,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { LOG.info("Processing for " + nd.getName() + "(" + ((Operator) nd).getIdentifier() + ")"); OpWalkerInfo owi = (OpWalkerInfo) procCtx; @@ -170,7 +170,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { LOG.info("Processing for " + nd.getName() + "(" + ((Operator) nd).getIdentifier() + ")"); OpWalkerInfo owi = (OpWalkerInfo) procCtx; @@ -223,7 +223,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, public static class JoinPPD extends DefaultPPD implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { LOG.info("Processing for " + nd.getName() + "(" + ((Operator) nd).getIdentifier() + ")"); OpWalkerInfo owi = (OpWalkerInfo) procCtx; @@ -294,7 +294,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * like this is ok. */ private void applyFilterTransitivity(JoinOperator nd, OpWalkerInfo owi) - throws SemanticException { + throws HiveException { ExprWalkerInfo prunePreds = owi.getPrunedPreds((Operator) nd); if (prunePreds != null) { @@ -399,7 +399,7 @@ private void applyFilterTransitivity(JoinOperator nd, OpWalkerInfo owi) * representing "table.column" or null if the ASTNode is not in that form */ private ColumnInfo getColumnInfoFromAST(ASTNode nd, - Map aliastoRR) throws SemanticException { + Map aliastoRR) throws HiveException { // this bit is messy since we are parsing an ASTNode at this point if (nd.getType()==HiveParser.DOT) { if (nd.getChildCount()==2) { @@ -501,7 +501,7 @@ private void replaceColumnReference(ExprNodeDesc expr, public static class ReduceSinkPPD extends DefaultPPD implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { LOG.info("Processing for " + nd.getName() + "(" + ((Operator) nd).getIdentifier() + ")"); ReduceSinkOperator rs = (ReduceSinkOperator) nd; @@ -542,7 +542,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { LOG.info("Processing for " + nd.getName() + "(" + ((Operator) nd).getIdentifier() + ")"); OpWalkerInfo owi = (OpWalkerInfo) procCtx; @@ -586,11 +586,11 @@ protected void logExpr(Node nd, ExprWalkerInfo ewi) { * aliases that this operator can pushdown. null means that all * aliases can be pushed down * @param ignoreAliases - * @throws SemanticException + * @throws HiveException */ protected boolean mergeWithChildrenPred(Node nd, OpWalkerInfo owi, ExprWalkerInfo ewi, Set aliases, boolean ignoreAliases) - throws SemanticException { + throws HiveException { boolean hasUnpushedPredicates = false; Operator current = (Operator) nd; List> children = current.getChildOperators(); @@ -648,7 +648,7 @@ private void removeCandidates(Operator operator, OpWalkerInfo owi) { protected ExprWalkerInfo mergeChildrenPred(Node nd, OpWalkerInfo owi, Set excludedAliases, boolean ignoreAliases) - throws SemanticException { + throws HiveException { if (nd.getChildren() == null) { return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java index cd5ae51..7c251cc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java +++ ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java @@ -38,9 +38,9 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.Transform; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; /** * Implements predicate pushdown. Predicate pushdown is a term borrowed from @@ -80,7 +80,7 @@ private ParseContext pGraphContext; @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { pGraphContext = pctx; // create a the context for walking operators diff --git ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicateTransitivePropagate.java ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicateTransitivePropagate.java index 1476e1a..2dfa980 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicateTransitivePropagate.java +++ ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicateTransitivePropagate.java @@ -44,11 +44,11 @@ import org.apache.hadoop.hive.ql.lib.PreOrderWalker; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.Transform; import org.apache.hadoop.hive.ql.parse.OpParseContext; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.RowResolver; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils; import org.apache.hadoop.hive.ql.plan.FilterDesc; @@ -64,7 +64,7 @@ private ParseContext pGraphContext; @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { + public ParseContext transform(ParseContext pctx) throws HiveException { pGraphContext = pctx; Map opRules = new LinkedHashMap(); @@ -143,7 +143,7 @@ public TransitiveContext() { private static class JoinTransitive implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { @SuppressWarnings("unchecked") CommonJoinOperator join = (CommonJoinOperator) nd; ReduceSinkOperator source = (ReduceSinkOperator) stack.get(stack.size() - 2); diff --git ql/src/java/org/apache/hadoop/hive/ql/tools/LineageInfo.java ql/src/java/org/apache/hadoop/hive/ql/tools/LineageInfo.java index 12154c9..45b61dc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/tools/LineageInfo.java +++ ql/src/java/org/apache/hadoop/hive/ql/tools/LineageInfo.java @@ -33,12 +33,12 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.ParseDriver; import org.apache.hadoop.hive.ql.parse.ParseException; -import org.apache.hadoop.hive.ql.parse.SemanticException; /** * @@ -77,7 +77,7 @@ * Implements the process method for the NodeProcessor interface. */ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { + Object... nodeOutputs) throws HiveException { ASTNode pt = (ASTNode) nd; switch (pt.getToken().getType()) { @@ -104,7 +104,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * @throws ParseException */ public void getLineageInfo(String query) throws ParseException, - SemanticException { + HiveException { /* * Get the AST tree @@ -139,7 +139,7 @@ public void getLineageInfo(String query) throws ParseException, } public static void main(String[] args) throws IOException, ParseException, - SemanticException { + HiveException { String query = args[0]; diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/AbstractGenericUDAFResolver.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/AbstractGenericUDAFResolver.java index 4d4e61d..a503fd6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/AbstractGenericUDAFResolver.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/AbstractGenericUDAFResolver.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.udf.generic; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; @@ -37,7 +38,7 @@ @SuppressWarnings("deprecation") @Override public GenericUDAFEvaluator getEvaluator(GenericUDAFParameterInfo info) - throws SemanticException { + throws HiveException { if (info.isAllColumns()) { throw new SemanticException( @@ -49,7 +50,7 @@ public GenericUDAFEvaluator getEvaluator(GenericUDAFParameterInfo info) @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] info) - throws SemanticException { + throws HiveException { throw new SemanticException( "This UDAF does not support the deprecated getEvaluator() method."); } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java index 1a00800..1883282 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java @@ -58,7 +58,7 @@ @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException { + throws HiveException { if (parameters.length != 1) { throw new UDFArgumentTypeException(parameters.length - 1, "Exactly one argument is expected."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFBridge.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFBridge.java index 3c143f5..c18a2a6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFBridge.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFBridge.java @@ -51,7 +51,7 @@ public GenericUDAFBridge(UDAF udaf) { } @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException { + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws HiveException { Class udafEvaluatorClass = udaf.getResolver() .getEvaluatorClass(Arrays.asList(parameters)); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectList.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectList.java index 536c4a7..875430f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectList.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectList.java @@ -22,7 +22,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMkCollectionEvaluator.BufferType; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; @@ -37,7 +37,7 @@ public GenericUDAFCollectList() { @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException { + throws HiveException { if (parameters.length != 1) { throw new UDFArgumentTypeException(parameters.length - 1, "Exactly one argument is expected."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectSet.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectSet.java index 6dc424a..3d87f7b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectSet.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectSet.java @@ -21,7 +21,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMkCollectionEvaluator.BufferType; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; @@ -39,7 +39,7 @@ public GenericUDAFCollectSet() { @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException { + throws HiveException { if (parameters.length != 1) { throw new UDFArgumentTypeException(parameters.length - 1, "Exactly one argument is expected."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFComputeStats.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFComputeStats.java index 7348478..27972bf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFComputeStats.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFComputeStats.java @@ -59,7 +59,7 @@ @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException { + throws HiveException { if (parameters.length != 2 ) { throw new UDFArgumentTypeException(parameters.length - 1, "Exactly two arguments are expected."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFContextNGrams.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFContextNGrams.java index 17e9d76..92f4b51 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFContextNGrams.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFContextNGrams.java @@ -59,7 +59,7 @@ static final Log LOG = LogFactory.getLog(GenericUDAFContextNGrams.class.getName()); @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException { + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws HiveException { if (parameters.length != 3 && parameters.length != 4) { throw new UDFArgumentTypeException(parameters.length-1, "Please specify either three or four arguments."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java index 8056931..ea1a426 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java @@ -73,7 +73,7 @@ public class GenericUDAFCorrelation extends AbstractGenericUDAFResolver { @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException { + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws HiveException { if (parameters.length != 2) { throw new UDFArgumentTypeException(parameters.length - 1, "Exactly two arguments are expected."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java index 89bc1a7..5c8c390 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java @@ -48,14 +48,14 @@ @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException { + throws HiveException { // This method implementation is preserved for backward compatibility. return new GenericUDAFCountEvaluator(); } @Override public GenericUDAFEvaluator getEvaluator(GenericUDAFParameterInfo paramInfo) - throws SemanticException { + throws HiveException { TypeInfo[] parameters = paramInfo.getParameters(); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java index f1017be..9eb9ac0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java @@ -67,7 +67,7 @@ static final Log LOG = LogFactory.getLog(GenericUDAFCovariance.class.getName()); @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException { + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws HiveException { if (parameters.length != 2) { throw new UDFArgumentTypeException(parameters.length - 1, "Exactly two arguments are expected."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovarianceSample.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovarianceSample.java index de0f153..6911b74 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovarianceSample.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovarianceSample.java @@ -41,7 +41,7 @@ @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException { + throws HiveException { if (parameters.length != 2) { throw new UDFArgumentTypeException(parameters.length - 1, "Exactly two arguments are expected."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEWAHBitmap.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEWAHBitmap.java index e4b412e..3917ee1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEWAHBitmap.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEWAHBitmap.java @@ -54,7 +54,7 @@ @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException { + throws HiveException { if (parameters.length != 1) { throw new UDFArgumentTypeException(parameters.length - 1, "Exactly one argument is expected."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFFirstValue.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFFirstValue.java index 748ce55..a9802a7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFFirstValue.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFFirstValue.java @@ -49,7 +49,7 @@ static final Log LOG = LogFactory.getLog(GenericUDAFFirstValue.class.getName()); @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws HiveException { if (parameters.length > 2) { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFHistogramNumeric.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFHistogramNumeric.java index f2e8e03..0514570 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFHistogramNumeric.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFHistogramNumeric.java @@ -61,7 +61,7 @@ static final Log LOG = LogFactory.getLog(GenericUDAFHistogramNumeric.class.getName()); @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException { + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws HiveException { if (parameters.length != 2) { throw new UDFArgumentTypeException(parameters.length - 1, "Please specify exactly two arguments."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLastValue.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLastValue.java index 138c152..00b6961 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLastValue.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLastValue.java @@ -42,7 +42,7 @@ @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException + throws HiveException { if (parameters.length > 2) { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java index 295cd2e..8f3e74e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java @@ -42,7 +42,7 @@ @Override public GenericUDAFEvaluator getEvaluator(GenericUDAFParameterInfo parameters) - throws SemanticException { + throws HiveException { ObjectInspector[] paramOIs = parameters.getParameterObjectInspectors(); String fNm = functionName(); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFMax.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFMax.java index d6e9db4..a38726c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFMax.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFMax.java @@ -37,7 +37,7 @@ @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException { + throws HiveException { if (parameters.length != 1) { throw new UDFArgumentTypeException(parameters.length - 1, "Exactly one argument is expected."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFMin.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFMin.java index 3dc9900..8aea5f6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFMin.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFMin.java @@ -37,7 +37,7 @@ @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException { + throws HiveException { if (parameters.length != 1) { throw new UDFArgumentTypeException(parameters.length - 1, "Exactly one argument is expected."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFNTile.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFNTile.java index 18cde76..d0da96f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFNTile.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFNTile.java @@ -54,7 +54,7 @@ static final Log LOG = LogFactory.getLog(GenericUDAFNTile.class.getName()); @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws HiveException { if (parameters.length != 1) { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFPercentileApprox.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFPercentileApprox.java index 56e76be..78f6ea3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFPercentileApprox.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFPercentileApprox.java @@ -66,7 +66,7 @@ static final Log LOG = LogFactory.getLog(GenericUDAFPercentileApprox.class.getName()); @Override - public GenericUDAFEvaluator getEvaluator(GenericUDAFParameterInfo info) throws SemanticException { + public GenericUDAFEvaluator getEvaluator(GenericUDAFParameterInfo info) throws HiveException { ObjectInspector[] parameters = info.getParameterObjectInspectors(); if (parameters.length != 2 && parameters.length != 3) { throw new UDFArgumentTypeException(parameters.length - 1, diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRank.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRank.java index 5c8f1e0..0428b01 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRank.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRank.java @@ -53,7 +53,7 @@ static final Log LOG = LogFactory.getLog(GenericUDAFRank.class.getName()); @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws HiveException { if (parameters.length < 1) { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFResolver.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFResolver.java index 75f9ca0..ab6c345 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFResolver.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFResolver.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql.udf.generic; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; /** @@ -48,8 +48,8 @@ * @param parameters * The types of the parameters. We need the type information to know * which evaluator class to use. - * @throws SemanticException + * @throws HiveException */ GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException; + throws HiveException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFResolver2.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFResolver2.java index d66b29a..06969b1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFResolver2.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFResolver2.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql.udf.generic; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * This interface extends the GenericUDAFResolver interface and @@ -55,8 +55,8 @@ * * @param info The parameter information that is applicable to the UDAF being * invoked. - * @throws SemanticException + * @throws HiveException */ GenericUDAFEvaluator getEvaluator( - GenericUDAFParameterInfo info) throws SemanticException; + GenericUDAFParameterInfo info) throws HiveException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRowNumber.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRowNumber.java index 987da3d..18ee09b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRowNumber.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRowNumber.java @@ -49,7 +49,7 @@ @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException + throws HiveException { if (parameters.length != 0) { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java index 159a2fe..42da424 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java @@ -36,7 +36,7 @@ @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException { + throws HiveException { if (parameters.length != 1) { throw new UDFArgumentTypeException(parameters.length - 1, "Exactly one argument is expected."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStdSample.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStdSample.java index e85046c..15db589 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStdSample.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStdSample.java @@ -35,7 +35,7 @@ public class GenericUDAFStdSample extends GenericUDAFVariance { @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException { + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws HiveException { if (parameters.length != 1) { throw new UDFArgumentTypeException(parameters.length - 1, "Exactly one argument is expected."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFSum.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFSum.java index 8508ffb..8b06504 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFSum.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFSum.java @@ -50,7 +50,7 @@ @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException { + throws HiveException { if (parameters.length != 1) { throw new UDFArgumentTypeException(parameters.length - 1, "Exactly one argument is expected."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java index 3545390..ea2c6e6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java @@ -53,7 +53,7 @@ static final Log LOG = LogFactory.getLog(GenericUDAFVariance.class.getName()); @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException { + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws HiveException { if (parameters.length != 1) { throw new UDFArgumentTypeException(parameters.length - 1, "Exactly one argument is expected."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVarianceSample.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVarianceSample.java index ab863be..186cc48 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVarianceSample.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVarianceSample.java @@ -36,7 +36,7 @@ @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException { + throws HiveException { if (parameters.length != 1) { throw new UDFArgumentTypeException(parameters.length - 1, "Exactly one argument is expected."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFnGrams.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFnGrams.java index 4a7caab..75f3046 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFnGrams.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFnGrams.java @@ -57,7 +57,7 @@ static final Log LOG = LogFactory.getLog(GenericUDAFnGrams.class.getName()); @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException { + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws HiveException { if (parameters.length != 3 && parameters.length != 4) { throw new UDFArgumentTypeException(parameters.length-1, "Please specify either three or four arguments."); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInFile.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInFile.java index ea52537..47b4606 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInFile.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInFile.java @@ -129,7 +129,7 @@ public void load(InputStream is) throws HiveException { set.add(line); } } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFReflect2.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFReflect2.java index 05e2163..503a907 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFReflect2.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFReflect2.java @@ -130,7 +130,7 @@ public Object evaluate(DeferredObject[] arguments) throws HiveException { } catch (InvocationTargetException e) { throw new HiveException(e.getCause()); } catch (Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } if (result == null) { return null; diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java index 9798aa6..799ef19 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java @@ -112,7 +112,7 @@ public void execute(PTFPartitionIterator pItr, PTFPartition outP) throws } } - static void throwErrorWithSignature(String message) throws SemanticException + static void throwErrorWithSignature(String message) throws HiveException { throw new SemanticException(String.format( "MatchPath signature is: SymbolPattern, one or more SymbolName, " + @@ -156,7 +156,7 @@ protected TableFunctionEvaluator createEvaluator(PTFDesc ptfDesc, * */ @Override - public void setupOutputOI() throws SemanticException + public void setupOutputOI() throws HiveException { MatchPath evaluator = (MatchPath) getEvaluator(); PartitionedTableFunctionDef tDef = evaluator.getTableDef(); @@ -184,12 +184,8 @@ public void setupOutputOI() throws SemanticException */ ResultExpressionParser resultExprParser = new ResultExpressionParser(evaluator.resultExprStr, selectListInputRR); - try { - resultExprParser.translate(); - } - catch(HiveException he) { - throw new SemanticException(he); - } + resultExprParser.translate(); + evaluator.resultExprInfo = resultExprParser.getResultExprInfo(); StructObjectInspector OI = evaluator.resultExprInfo.resultOI; @@ -199,7 +195,7 @@ public void setupOutputOI() throws SemanticException * validate and setup patternStr */ private void validateAndSetupPatternStr(MatchPath evaluator, - List args) throws SemanticException { + List args) throws HiveException { PTFExpressionDef symboPatternArg = args.get(0); ObjectInspector symbolPatternArgOI = symboPatternArg.getOI(); @@ -220,7 +216,7 @@ private void validateAndSetupPatternStr(MatchPath evaluator, */ private void validateAndSetupSymbolInfo(MatchPath evaluator, List args, - int argsNum) throws SemanticException { + int argsNum) throws HiveException { int symbolArgsSz = argsNum - 2; if ( symbolArgsSz % 2 != 0) { @@ -264,7 +260,7 @@ private void validateAndSetupSymbolInfo(MatchPath evaluator, */ private void validateAndSetupResultExprStr(MatchPath evaluator, List args, - int argsNum) throws SemanticException { + int argsNum) throws HiveException { PTFExpressionDef resultExprArg = args.get(argsNum - 1); ObjectInspector resultExprArgOI = resultExprArg.getOI(); @@ -283,7 +279,7 @@ private void validateAndSetupResultExprStr(MatchPath evaluator, /* * setup SymbolFunction chain. */ - private void setupSymbolFunctionChain(MatchPath evaluator) throws SemanticException { + private void setupSymbolFunctionChain(MatchPath evaluator) throws HiveException { SymbolParser syP = new SymbolParser(evaluator.patternStr, evaluator.symInfo.symbolExprsNames, evaluator.symInfo.symbolExprsEvaluators, evaluator.symInfo.symbolExprsOIs); @@ -299,43 +295,38 @@ public boolean transformsRawInput() @Override public void initializeOutputOI() throws HiveException { - try { - MatchPath evaluator = (MatchPath) getEvaluator(); - PartitionedTableFunctionDef tDef = evaluator.getTableDef(); + MatchPath evaluator = (MatchPath) getEvaluator(); + PartitionedTableFunctionDef tDef = evaluator.getTableDef(); - List args = tDef.getArgs(); - int argsNum = args.size(); + List args = tDef.getArgs(); + int argsNum = args.size(); - validateAndSetupPatternStr(evaluator, args); - validateAndSetupSymbolInfo(evaluator, args, argsNum); - validateAndSetupResultExprStr(evaluator, args, argsNum); - setupSymbolFunctionChain(evaluator); + validateAndSetupPatternStr(evaluator, args); + validateAndSetupSymbolInfo(evaluator, args, argsNum); + validateAndSetupResultExprStr(evaluator, args, argsNum); + setupSymbolFunctionChain(evaluator); /* * setup OI for input to resultExpr select list */ - StructObjectInspector selectListInputOI = MatchPath.createSelectListOI( evaluator, - tDef.getInput()); - ResultExprInfo resultExprInfo = evaluator.resultExprInfo; - ArrayList selectListExprOIs = new ArrayList(); - resultExprInfo.resultExprEvals = new ArrayList(); - - for(int i=0 ; i < resultExprInfo.resultExprNodes.size(); i++) { - ExprNodeDesc selectColumnExprNode =resultExprInfo.resultExprNodes.get(i); - ExprNodeEvaluator selectColumnExprEval = - ExprNodeEvaluatorFactory.get(selectColumnExprNode); - ObjectInspector selectColumnOI = selectColumnExprEval.initialize(selectListInputOI); - resultExprInfo.resultExprEvals.add(selectColumnExprEval); - selectListExprOIs.add(selectColumnOI); - } + StructObjectInspector selectListInputOI = MatchPath.createSelectListOI( evaluator, + tDef.getInput()); + ResultExprInfo resultExprInfo = evaluator.resultExprInfo; + ArrayList selectListExprOIs = new ArrayList(); + resultExprInfo.resultExprEvals = new ArrayList(); - resultExprInfo.resultOI = ObjectInspectorFactory.getStandardStructObjectInspector( - resultExprInfo.resultExprNames, selectListExprOIs); - setOutputOI(resultExprInfo.resultOI); - } - catch(SemanticException se) { - throw new HiveException(se); + for(int i=0 ; i < resultExprInfo.resultExprNodes.size(); i++) { + ExprNodeDesc selectColumnExprNode =resultExprInfo.resultExprNodes.get(i); + ExprNodeEvaluator selectColumnExprEval = + ExprNodeEvaluatorFactory.get(selectColumnExprNode); + ObjectInspector selectColumnOI = selectColumnExprEval.initialize(selectListInputOI); + resultExprInfo.resultExprEvals.add(selectColumnExprEval); + selectListExprOIs.add(selectColumnOI); } + + resultExprInfo.resultOI = ObjectInspectorFactory.getStandardStructObjectInspector( + resultExprInfo.resultExprNames, selectListExprOIs); + setOutputOI(resultExprInfo.resultOI); } @Override @@ -632,7 +623,7 @@ public SymbolFunction getSymbolFunction() return symbolFnChain; } - public void parse() throws SemanticException + public void parse() throws HiveException { symbols = patternStr.split("\\."); symbolFunctions = new ArrayList(); @@ -695,7 +686,7 @@ public ResultExpressionParser(String resultExprString, this.selectListInputRowResolver = selectListInputRowResolver; } - public void translate() throws SemanticException, HiveException + public void translate() throws HiveException, HiveException { setupSelectListInputInfo(); fixResultExprString(); @@ -708,7 +699,7 @@ public ResultExprInfo getResultExprInfo() { return resultExprInfo; } - private void buildSelectListEvaluators() throws SemanticException, HiveException + private void buildSelectListEvaluators() throws HiveException, HiveException { resultExprInfo = new ResultExprInfo(); resultExprInfo.resultExprEvals = new ArrayList(); @@ -742,7 +733,7 @@ private void buildSelectListEvaluators() throws SemanticException, HiveException resultExprInfo.resultExprNames, selectListExprOIs); } - private void setupSelectListInputInfo() throws SemanticException + private void setupSelectListInputInfo() throws HiveException { selectListInputTypeCheckCtx = new TypeCheckCtx(selectListInputRowResolver); selectListInputTypeCheckCtx.setUnparseTranslator(null); @@ -764,12 +755,12 @@ private void fixResultExprString() resultExprString = r; } - private void parse() throws SemanticException + private void parse() throws HiveException { selectSpec = SemanticAnalyzer.parseSelect(resultExprString); } - private void validateSelectExpr() throws SemanticException + private void validateSelectExpr() throws HiveException { for (WindowExpressionSpec expr : selectSpec) { @@ -792,7 +783,7 @@ else if (exprNode instanceof ExprNodeColumnDesc) } public static ExprNodeDesc buildExprNode(ASTNode expr, - TypeCheckCtx typeCheckCtx) throws SemanticException + TypeCheckCtx typeCheckCtx) throws HiveException { // todo: use SemanticAnalyzer::genExprNodeDesc // currently SA not available to PTFTranslator. @@ -816,7 +807,7 @@ public static ExprNodeDesc buildExprNode(ASTNode expr, * add array to the list of columns */ protected static RowResolver createSelectListRR(MatchPath evaluator, - PTFInputDef inpDef) throws SemanticException { + PTFInputDef inpDef) throws HiveException { RowResolver rr = new RowResolver(); RowResolver inputRR = inpDef.getOutputShape().getRr(); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/Noop.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/Noop.java index fcf6afd..82cc8b8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/Noop.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/Noop.java @@ -48,7 +48,7 @@ protected TableFunctionEvaluator createEvaluator(PTFDesc ptfDesc, PartitionedTab } @Override - public void setupOutputOI() throws SemanticException { + public void setupOutputOI() throws HiveException { StructObjectInspector OI = getEvaluator().getTableDef().getInput().getOutputShape().getOI(); setOutputOI(OI); } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/NoopWithMap.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/NoopWithMap.java index 0b090a9..72a5731 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/NoopWithMap.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/NoopWithMap.java @@ -51,7 +51,7 @@ protected TableFunctionEvaluator createEvaluator(PTFDesc ptfDesc, PartitionedTab } @Override - public void setupOutputOI() throws SemanticException + public void setupOutputOI() throws HiveException { StructObjectInspector OI = getEvaluator().getTableDef().getInput().getOutputShape().getOI(); setOutputOI(OI); @@ -78,7 +78,7 @@ public boolean carryForwardNames() { } @Override - public void setupRawInputOI() throws SemanticException + public void setupRawInputOI() throws HiveException { StructObjectInspector OI = getEvaluator().getTableDef().getInput().getOutputShape().getOI(); setRawInputOI(OI); @@ -90,7 +90,7 @@ public void setupRawInputOI() throws SemanticException * Set to null only because carryForwardNames is true. */ @Override - public ArrayList getRawInputColumnNames() throws SemanticException { + public ArrayList getRawInputColumnNames() throws HiveException { return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java index 969013c..02d56ac 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java @@ -64,7 +64,7 @@ * the transformsRawInput boolean. */ public void initialize(HiveConf cfg, PTFDesc ptfDesc, PartitionedTableFunctionDef tDef) - throws SemanticException { + throws HiveException { this.ptfDesc = ptfDesc; evaluator = createEvaluator(ptfDesc, tDef); @@ -95,13 +95,13 @@ public TableFunctionEvaluator getEvaluator() { * - subsequent to this call, a call to getOutputOI call on the {@link TableFunctionEvaluator} must return the OI * of the output of this function. */ - public abstract void setupOutputOI() throws SemanticException; + public abstract void setupOutputOI() throws HiveException; /* * A PTF Function must provide the 'external' names of the columns in its Output. * */ - public abstract List getOutputColumnNames() throws SemanticException; + public abstract List getOutputColumnNames() throws HiveException; /** @@ -123,7 +123,7 @@ public TableFunctionEvaluator getEvaluator() { * - subsequent to this call, a call to getRawInputOI call on the {@link TableFunctionEvaluator} must return the OI * of the output of this function. */ - public void setupRawInputOI() throws SemanticException { + public void setupRawInputOI() throws HiveException { if (!transformsRawInput()) { return; } @@ -135,7 +135,7 @@ public void setupRawInputOI() throws SemanticException { * A PTF Function must provide the 'external' names of the columns in the transformed Raw Input. * */ - public List getRawInputColumnNames() throws SemanticException { + public List getRawInputColumnNames() throws HiveException { if (!transformsRawInput()) { return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java index 110ef27..73df63b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java @@ -145,7 +145,7 @@ protected TableFunctionEvaluator createEvaluator(PTFDesc ptfDesc, PartitionedTab } @Override - public void setupOutputOI() throws SemanticException { + public void setupOutputOI() throws HiveException { setOutputOI(wdwProcessingOutputOI); } diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java index 5991aae..ab6c665 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java @@ -38,8 +38,8 @@ import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -223,7 +223,7 @@ private void populateMapPlan2(Table src) { } @SuppressWarnings("unchecked") - private void populateMapRedPlan1(Table src) throws SemanticException { + private void populateMapRedPlan1(Table src) throws HiveException { ArrayList outputColumns = new ArrayList(); for (int i = 0; i < 2; i++) { @@ -253,7 +253,7 @@ private void populateMapRedPlan1(Table src) throws SemanticException { } @SuppressWarnings("unchecked") - private void populateMapRedPlan2(Table src) throws SemanticException { + private void populateMapRedPlan2(Table src) throws HiveException { ArrayList outputColumns = new ArrayList(); for (int i = 0; i < 2; i++) { outputColumns.add("_col" + i); @@ -288,7 +288,7 @@ private void populateMapRedPlan2(Table src) throws SemanticException { * test reduce with multiple tagged inputs. */ @SuppressWarnings("unchecked") - private void populateMapRedPlan3(Table src, Table src2) throws SemanticException { + private void populateMapRedPlan3(Table src, Table src2) throws HiveException { List outputColumns = new ArrayList(); for (int i = 0; i < 2; i++) { outputColumns.add("_col" + i); @@ -330,7 +330,7 @@ private void populateMapRedPlan3(Table src, Table src2) throws SemanticException } @SuppressWarnings("unchecked") - private void populateMapRedPlan4(Table src) throws SemanticException { + private void populateMapRedPlan4(Table src) throws HiveException { // map-side work ArrayList outputColumns = new ArrayList(); @@ -375,7 +375,7 @@ public static ExprNodeColumnDesc getStringColumn(String columnName) { } @SuppressWarnings("unchecked") - private void populateMapRedPlan5(Table src) throws SemanticException { + private void populateMapRedPlan5(Table src) throws HiveException { // map-side work ArrayList outputColumns = new ArrayList(); @@ -409,7 +409,7 @@ private void populateMapRedPlan5(Table src) throws SemanticException { } @SuppressWarnings("unchecked") - private void populateMapRedPlan6(Table src) throws SemanticException { + private void populateMapRedPlan6(Table src) throws HiveException { // map-side work ArrayList outputColumns = new ArrayList(); diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java index a2b45f8..50cfaf8 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java @@ -1950,7 +1950,7 @@ public static Validator getValidator(String aggregate) throws HiveException { } } }catch(Exception e) { - throw new HiveException(e); + throw HiveException.wrap(e); } throw new HiveException("Missing validator for aggregate: " + aggregate); } diff --git ql/src/test/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/TestDynamicMultiDimeCollection.java ql/src/test/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/TestDynamicMultiDimeCollection.java index 6411a72..825b485 100644 --- ql/src/test/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/TestDynamicMultiDimeCollection.java +++ ql/src/test/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/TestDynamicMultiDimeCollection.java @@ -23,7 +23,7 @@ import junit.framework.TestCase; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.junit.Assert; import org.junit.Test; @@ -102,7 +102,7 @@ public void testUniqueElementsList3() { } @Test - public void testFlat3() throws SemanticException { + public void testFlat3() throws HiveException { List> uniqSkewedElements = new ArrayList>(); List v1 = Arrays.asList("1", "2", "default"); List v2 = Arrays.asList("a", "b", "c", "default"); @@ -116,7 +116,7 @@ public void testFlat3() throws SemanticException { } @Test - public void testFlat2() throws SemanticException { + public void testFlat2() throws HiveException { List> uniqSkewedElements = new ArrayList>(); List v1 = Arrays.asList("1", "2"); uniqSkewedElements.add(v1); @@ -131,7 +131,7 @@ public void testFlat2() throws SemanticException { } @Test - public void testFlat1() throws SemanticException { + public void testFlat1() throws HiveException { List> uniqSkewedElements = new ArrayList>(); List v1 = Arrays.asList("1", "2"); List v2 = Arrays.asList("3", "4"); diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestEximUtil.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestEximUtil.java index 108f8e5..8db9a03 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestEximUtil.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestEximUtil.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.parse; import junit.framework.TestCase; +import org.apache.hadoop.hive.ql.metadata.HiveException; /** * TestEximUtil. @@ -34,7 +35,7 @@ protected void setUp() { protected void tearDown() { } - public void testCheckCompatibility() throws SemanticException { + public void testCheckCompatibility() throws HiveException { // backward/forward compatible EximUtil.doCheckCompatibility( @@ -61,7 +62,7 @@ public void testCheckCompatibility() throws SemanticException { null // data's FC version ); // No exceptions expected fail(); - } catch (SemanticException e) { + } catch (HiveException e) { } // not forward compatible @@ -72,7 +73,7 @@ public void testCheckCompatibility() throws SemanticException { null // data's FC version ); // No exceptions expected fail(); - } catch (SemanticException e) { + } catch (HiveException e) { } // forward compatible @@ -105,7 +106,7 @@ public void testCheckCompatibility() throws SemanticException { "10.3" // data's FC version ); // No exceptions expected fail(); - } catch (SemanticException e) { + } catch (HiveException e) { } } diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java index 01583c7..cfab6ae 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.BaseWork; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.MapWork; @@ -79,7 +80,7 @@ public void setUp() throws Exception { proc = new GenTezWork() { @Override protected void setupMapWork(MapWork mapWork, GenTezProcContext context, - Operator root, String alias) throws SemanticException { + Operator root, String alias) throws HiveException { LinkedHashMap> map = new LinkedHashMap>(); map.put("foo", root); @@ -115,7 +116,7 @@ public void tearDown() throws Exception { } @Test - public void testCreateMap() throws SemanticException { + public void testCreateMap() throws HiveException { proc.process(rs, null, ctx, (Object[])null); assertNotNull(ctx.currentTask); @@ -143,7 +144,7 @@ public void testCreateMap() throws SemanticException { } @Test - public void testCreateReduce() throws SemanticException { + public void testCreateReduce() throws HiveException { // create map proc.process(rs, null, ctx, (Object[])null); diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBJoinTreeApplyPredicate.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBJoinTreeApplyPredicate.java index 9e77949..f96b1a1 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBJoinTreeApplyPredicate.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBJoinTreeApplyPredicate.java @@ -23,6 +23,7 @@ import junit.framework.Assert; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Before; import org.junit.BeforeClass; @@ -41,7 +42,7 @@ public static void initialize() { } @Before - public void setup() throws SemanticException { + public void setup() throws HiveException { sA = new SemanticAnalyzer(conf); } @@ -129,7 +130,7 @@ QBJoinTree createJoinTree(JoinType type, ASTNode applyEqPredicate(QBJoinTree jT, String lTbl, String lCol, - String rTbl, String rCol) throws SemanticException { + String rTbl, String rCol) throws HiveException { ASTNode joinCond = constructEqualityCond(lTbl, lCol, rTbl, rCol); ASTNode leftCondn = (ASTNode) joinCond.getChild(0); @@ -152,7 +153,7 @@ ASTNode applyEqPredicate(QBJoinTree jT, } @Test - public void testSimpleCondn() throws SemanticException { + public void testSimpleCondn() throws HiveException { QBJoinTree jT = createJoinTree(JoinType.INNER, "a", null, "b"); ASTNode joinCond = applyEqPredicate(jT, "a", "x", "b", "y"); Assert.assertEquals(jT.getExpressions().get(0).get(0), joinCond.getChild(0)); @@ -160,7 +161,7 @@ public void testSimpleCondn() throws SemanticException { } @Test - public void test3WayJoin() throws SemanticException { + public void test3WayJoin() throws HiveException { QBJoinTree jT1 = createJoinTree(JoinType.INNER, "a", null, "b"); QBJoinTree jT = createJoinTree(JoinType.INNER, "b", jT1, "c"); ASTNode joinCond1 = applyEqPredicate(jT, "a", "x", "b", "y"); @@ -172,7 +173,7 @@ public void test3WayJoin() throws SemanticException { } @Test - public void test3WayJoinSwitched() throws SemanticException { + public void test3WayJoinSwitched() throws HiveException { QBJoinTree jT1 = createJoinTree(JoinType.INNER, "a", null, "b"); QBJoinTree jT = createJoinTree(JoinType.INNER, "b", jT1, "c"); ASTNode joinCond1 = applyEqPredicate(jT, "b", "y", "a", "x"); @@ -184,7 +185,7 @@ public void test3WayJoinSwitched() throws SemanticException { } @Test - public void test4WayJoin() throws SemanticException { + public void test4WayJoin() throws HiveException { QBJoinTree jT1 = createJoinTree(JoinType.INNER, "a", null, "b"); QBJoinTree jT2 = createJoinTree(JoinType.INNER, "b", jT1, "c"); QBJoinTree jT = createJoinTree(JoinType.INNER, "c", jT2, "d"); @@ -200,7 +201,7 @@ public void test4WayJoin() throws SemanticException { } @Test - public void test4WayJoinSwitched() throws SemanticException { + public void test4WayJoinSwitched() throws HiveException { QBJoinTree jT1 = createJoinTree(JoinType.INNER, "a", null, "b"); QBJoinTree jT2 = createJoinTree(JoinType.INNER, "b", jT1, "c"); QBJoinTree jT = createJoinTree(JoinType.INNER, "c", jT2, "d"); diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBSubQuery.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBSubQuery.java index 7e57471..c3fbcaa 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBSubQuery.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBSubQuery.java @@ -6,6 +6,7 @@ import junit.framework.Assert; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Before; import org.junit.BeforeClass; @@ -34,7 +35,7 @@ public static void initialize() { } @Before - public void setup() throws SemanticException { + public void setup() throws HiveException { pd = new ParseDriver(); sA = new SemanticAnalyzer(conf); } diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestSemanticAnalyzer.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestSemanticAnalyzer.java index be1f7ff..e765c85 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestSemanticAnalyzer.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestSemanticAnalyzer.java @@ -23,6 +23,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.io.DateWritable; import org.junit.Test; @@ -42,19 +43,19 @@ public void testNormalizeColSpec() throws Exception { try { checkNormalization("date", "foo", "", "foo"); // Bad format. fail("should throw"); - } catch (SemanticException ex) { + } catch (HiveException ex) { } try { checkNormalization("date", "2010-01-01", "2010-01-01", "2010-01-01"); // Bad value type. fail("should throw"); - } catch (SemanticException ex) { + } catch (HiveException ex) { } } public void checkNormalization(String colType, String originalColSpec, - String result, Object colValue) throws SemanticException { + String result, Object colValue) throws HiveException { final String colName = "col"; Map partSpec = new HashMap(); partSpec.put(colName, originalColSpec);