diff --git ql/src/java/org/apache/hadoop/hive/ql/Driver.java ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 7549144766..342d463480 100644 --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -139,10 +139,6 @@ public Schema getSchema() { return driverContext.getSchema(); } - public Schema getExplainSchema() { - return new Schema(ExplainTask.getResultSchema(), null); - } - @Override public Context getContext() { return context; @@ -168,7 +164,7 @@ public Driver(HiveConf conf) { // Pass lineageState when a driver instantiates another Driver to run // or compile another query public Driver(HiveConf conf, Context ctx, LineageState lineageState) { - this(getNewQueryState(conf, lineageState), null); + this(QueryState.getNewQueryState(conf, lineageState), null); context = ctx; } @@ -185,18 +181,11 @@ public Driver(QueryState queryState, QueryInfo queryInfo, HiveTxnManager txnMana txnManager); } - /** - * Generating the new QueryState object. Making sure, that the new queryId is generated. - * @param conf The HiveConf which should be used - * @param lineageState a LineageState to be set in the new QueryState object - * @return The new QueryState object - */ - public static QueryState getNewQueryState(HiveConf conf, LineageState lineageState) { - return new QueryState.Builder() - .withGenerateNewQueryId(true) - .withHiveConf(conf) - .withLineageState(lineageState) - .build(); + public Driver(QueryState queryState, QueryInfo queryInfo, HiveTxnManager txnManager, + ValidWriteIdList compactionWriteIds, long compactorTxnId) { + this(queryState, queryInfo, txnManager); + driverContext.setCompactionWriteIds(compactionWriteIds); + driverContext.setCompactorTxnId(compactorTxnId); } /** @@ -1816,7 +1805,7 @@ private void releaseCachedResult() { // Close and release resources within a running query process. Since it runs under // driver state COMPILING, EXECUTING or INTERRUPT, it would not have race condition // with the releases probably running in the other closing thread. - public int closeInProcess(boolean destroyed) { + private int closeInProcess(boolean destroyed) { releaseTaskQueue(); releasePlan(); releaseCachedResult(); @@ -1930,9 +1919,4 @@ public boolean hasResultSet() { return driverContext.getPlan().getFetchTask() != null && driverContext.getPlan().getResultSchema() != null && driverContext.getPlan().getResultSchema().isSetFieldSchemas(); } - - void setCompactionWriteIds(ValidWriteIdList compactionWriteIds, long compactorTxnId) { - driverContext.setCompactionWriteIds(compactionWriteIds); - driverContext.setCompactorTxnId(compactorTxnId); - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/DriverUtils.java ql/src/java/org/apache/hadoop/hive/ql/DriverUtils.java index 1eacf69657..21e5f7228a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/DriverUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/DriverUtils.java @@ -64,8 +64,7 @@ public static void runOnDriver(HiveConf conf, String user, boolean isOk = false; try { QueryState qs = new QueryState.Builder().withHiveConf(conf).withGenerateNewQueryId(true).nonIsolated().build(); - Driver driver = new Driver(qs, null, null); - driver.setCompactionWriteIds(writeIds, compactorTxnId); + Driver driver = new Driver(qs, null, null, writeIds, compactorTxnId); try { try { driver.run(query); diff --git ql/src/java/org/apache/hadoop/hive/ql/QueryState.java ql/src/java/org/apache/hadoop/hive/ql/QueryState.java index 267f7d041f..280b7a4719 100644 --- ql/src/java/org/apache/hadoop/hive/ql/QueryState.java +++ ql/src/java/org/apache/hadoop/hive/ql/QueryState.java @@ -58,6 +58,7 @@ private long numModifiedRows = 0; static public final String USERID_TAG = "userid"; + /** * Private constructor, use QueryState.Builder instead. * @param conf The query specific configuration object @@ -140,6 +141,20 @@ public static void setApplicationTag(HiveConf queryConf, String queryTag) { queryConf.set(TezConfiguration.TEZ_APPLICATION_TAGS, jobTag); } + /** + * Generating the new QueryState object. Making sure, that the new queryId is generated. + * @param conf The HiveConf which should be used + * @param lineageState a LineageState to be set in the new QueryState object + * @return The new QueryState object + */ + public static QueryState getNewQueryState(HiveConf conf, LineageState lineageState) { + return new QueryState.Builder() + .withGenerateNewQueryId(true) + .withHiveConf(conf) + .withLineageState(lineageState) + .build(); + } + /** * Builder to instantiate the QueryState object. */ diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 20b0ccd94b..1914089b41 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -2293,7 +2293,7 @@ private void analyzeAlterTableAddParts(TableName tName, CommonTree ast, boolean cmd.append(")"); } // FIXME: is it ok to have a completely new querystate? - QueryState newQueryState = Driver.getNewQueryState(conf, queryState.getLineageState()); + QueryState newQueryState = QueryState.getNewQueryState(conf, queryState.getLineageState()); // FIXME: this driver instance is never closed Driver driver = new Driver(newQueryState); int rc = driver.compile(cmd.toString(), false); diff --git ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecDriver.java ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecDriver.java index a32af75f8f..eab7f45fde 100644 --- ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecDriver.java +++ ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecDriver.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.ql.QueryInfo; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.exec.ExplainTask; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.parse.ASTNode; @@ -252,8 +253,8 @@ public FetchTask getFetchTask() { @Override public Schema getSchema() { - if(explainReOptimization) { - return coreDriver.getExplainSchema(); + if (explainReOptimization) { + return new Schema(ExplainTask.getResultSchema(), null); } return coreDriver.getSchema(); }