Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java =================================================================== --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 822463) +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy) @@ -24,6 +24,7 @@ import java.util.Iterator; import java.util.Map; import java.util.Properties; +import java.util.Map.Entry; import javax.security.auth.login.LoginException; @@ -93,6 +94,9 @@ // session identifier HIVESESSIONID("hive.session.id", ""), + // The session is a dry run + HIVESESSIONDRYRUN("hive.session.dryrun", false), + // query being executed (multiple per session) HIVEQUERYSTRING("hive.query.string", ""), @@ -425,4 +429,17 @@ public static String getColumnInternalName(int pos){ return "_col"+pos; } + + /** + * Clone the configuration. + */ + @Override + public Object clone() { + HiveConf newConf = new HiveConf(); + for(Entry ent: this.getAllProperties().entrySet()) { + newConf.set((String)ent.getKey(), (String)ent.getValue()); + } + + return newConf; + } } Index: metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java =================================================================== --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (revision 822463) +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (working copy) @@ -48,7 +48,7 @@ super.setUp(); hiveConf = new HiveConf(this.getClass()); try { - client = new HiveMetaStoreClient(hiveConf); + client = new HiveMetaStoreClient("hive test metastore", hiveConf); } catch (Throwable e) { System.err.println("Unable to open the metastore"); System.err.println(StringUtils.stringifyException(e)); Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 822463) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -66,11 +67,10 @@ public static class HMSHandler extends FacebookBase implements ThriftHiveMetastore.Iface { public static final Log LOG = LogFactory.getLog(HiveMetaStore.class.getName()); - private static boolean createDefaultDB = false; private String rawStoreClassName; private HiveConf hiveConf; // stores datastore (jpox) properties, right now they come from jpox.properties private Warehouse wh; // hdfs warehouse - private ThreadLocal threadLocalMS = new ThreadLocal() { + private ThreadLocal> threadLocalMS = new ThreadLocal() { protected synchronized Object initialValue() { return null; } @@ -124,12 +124,17 @@ * @throws MetaException */ private RawStore getMS() throws MetaException { - RawStore ms = threadLocalMS.get(); + HashMap msMap = threadLocalMS.get(); + if (msMap == null) { + msMap = new HashMap(); + threadLocalMS.set(msMap); + } + + RawStore ms = msMap.get(this.getName()); if(ms == null) { LOG.info(threadLocalId.get() + ": Opening raw store with implemenation class:" + rawStoreClassName); ms = (RawStore) ReflectionUtils.newInstance(getClass(rawStoreClassName, RawStore.class), hiveConf); - threadLocalMS.set(ms); - ms = threadLocalMS.get(); + msMap.put(this.getName(), ms); } return ms; } @@ -139,7 +144,7 @@ * @throws MetaException */ private void createDefaultDB() throws MetaException { - if(HMSHandler.createDefaultDB || !checkForDefaultDb) { + if(!checkForDefaultDb) { return; } try { @@ -148,7 +153,6 @@ getMS().createDatabase(new Database(MetaStoreUtils.DEFAULT_DATABASE_NAME, wh.getDefaultDatabasePath(MetaStoreUtils.DEFAULT_DATABASE_NAME).toString())); } - HMSHandler.createDefaultDB = true; } private Class getClass(String rawStoreClassName, Class class1) throws MetaException { Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (revision 822463) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (working copy) @@ -63,7 +63,7 @@ static final private Log LOG = LogFactory.getLog("hive.metastore"); - public HiveMetaStoreClient(HiveConf conf) throws MetaException { + public HiveMetaStoreClient(String name, HiveConf conf) throws MetaException { if(conf == null) { conf = new HiveConf(HiveMetaStoreClient.class); } @@ -71,7 +71,7 @@ boolean localMetaStore = conf.getBoolean("hive.metastore.local", false); if(localMetaStore) { // instantiate the metastore server handler directly instead of connecting through the network - client = new HiveMetaStore.HMSHandler("hive client", conf); + client = new HiveMetaStore.HMSHandler(name, conf); this.open = true; return; } Index: ql/src/test/results/clientpositive/session_test.q.out =================================================================== --- ql/src/test/results/clientpositive/session_test.q.out (revision 0) +++ ql/src/test/results/clientpositive/session_test.q.out (revision 0) @@ -0,0 +1,190 @@ +PREHOOK: query: CREATE TABLE session_test1(KEY STRING, VALUE STRING) PARTITIONED BY(ds DATETIME) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE session_test1(KEY STRING, VALUE STRING) PARTITIONED BY(ds DATETIME) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@session_test1 +PREHOOK: query: EXPLAIN +SELECT * FROM ( + SELECT key, value FROM src + UNION ALL + SELECT key, value FROM session_test1) a +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT * FROM ( + SELECT key, value FROM src + UNION ALL + SELECT key, value FROM session_test1) a +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))))) (TOK_QUERY (TOK_FROM (TOK_TABREF session_test1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)))))) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + null-subquery1:a-subquery1:src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Union + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + null-subquery2:a-subquery2:session_test1 + TableScan + alias: session_test1 + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Union + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +PREHOOK: query: SELECT src.key, session_test1.value FROM src JOIN session_test1 ON (src.key = session_test1.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/data/users/athusoo/commits/hive_trunk_ws9/build/ql/tmp/784245638/10000 +POSTHOOK: query: SELECT src.key, session_test1.value FROM src JOIN session_test1 ON (src.key = session_test1.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/data/users/athusoo/commits/hive_trunk_ws9/build/ql/tmp/784245638/10000 +ABSTRACT SYNTAX TREE: + (TOK_FROM (TOK_JOIN (TOK_TABREF src) (TOK_TABREF session_test1) (= (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL session_test1) key)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + src + TableScan + alias: src + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 0 + value expressions: + expr: key + type: string + session_test1 + TableScan + alias: session_test1 + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 1 + value expressions: + expr: value + type: string + Needs Tagging: true + Path -> Alias: + file:/data/users/athusoo/commits/hive_trunk_ws9/build/ql/test/data/warehouse/src [src] + Path -> Partition: + file:/data/users/athusoo/commits/hive_trunk_ws9/build/ql/test/data/warehouse/src + Partition + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + name src + columns.types string:string + serialization.ddl struct src { string key, string value} + serialization.format 1 + columns key,value + bucket_count -1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + file.inputformat org.apache.hadoop.mapred.TextInputFormat + file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + location file:/data/users/athusoo/commits/hive_trunk_ws9/build/ql/test/data/warehouse/src + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: src + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col0} + 1 {VALUE._col1} + outputColumnNames: _col0, _col3 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col3 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + directory: file:/data/users/athusoo/commits/hive_trunk_ws9/build/ql/tmp/784245638/10001 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + serialization.format 1 + columns.types string:string + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +PREHOOK: query: DROP TABLE session_test1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE session_test1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: default@session_test1 Index: ql/src/test/queries/clientpositive/session_test.q =================================================================== --- ql/src/test/queries/clientpositive/session_test.q (revision 0) +++ ql/src/test/queries/clientpositive/session_test.q (revision 0) @@ -0,0 +1,13 @@ +set hive.session.dryrun=true; + +CREATE TABLE session_test1(KEY STRING, VALUE STRING) PARTITIONED BY(ds DATETIME) STORED AS TEXTFILE; + +EXPLAIN +SELECT * FROM ( + SELECT key, value FROM src + UNION ALL + SELECT key, value FROM session_test1) a; + +SELECT src.key, session_test1.value FROM src JOIN session_test1 ON (src.key = session_test1.key); + +DROP TABLE session_test1; Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (revision 822463) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (working copy) @@ -67,6 +67,11 @@ private Class inputFormatClass; private Class outputFormatClass; private org.apache.hadoop.hive.metastore.api.Table tTable; + + /** + * Flag to decide whether the table is a session specific temporary table or not. + */ + private boolean isTemp; /** * Table (only used internally) @@ -602,4 +607,21 @@ newTbl.outputFormatClass = this.outputFormatClass; return newTbl; } + /** + * Returns if the table is a session temporary table or not. + * + * @return true if this is a temporary table. + */ + public boolean isTempTable() { + return isTemp; + } + + /** + * Sets the session temporary flag for the table. + * + * @param isTemp true if this is a session temporary table, false otherwise. + */ + public void setTemp(boolean isTemp) { + this.isTemp = isTemp; + } }; Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (revision 822463) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (working copy) @@ -40,6 +40,7 @@ import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -67,6 +68,8 @@ static final private Log LOG = LogFactory.getLog("hive.ql.metadata.Hive"); + static IMetaStoreClient sessionMSC = null; + private HiveConf conf = null; private IMetaStoreClient metaStoreClient; @@ -222,7 +225,11 @@ Table newTbl) throws InvalidOperationException, HiveException { try { - getMSC().alter_table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, newTbl.getTTable()); + if (newTbl.isTempTable()) { + getSessionMSC().alter_table(newTbl.getDbName(), tblName, newTbl.getTTable()); + } else { + getMSC().alter_table(newTbl.getDbName(), tblName, newTbl.getTTable()); + } } catch (MetaException e) { throw new HiveException("Unable to alter table.", e); } catch (TException e) { @@ -252,7 +259,13 @@ tbl.setFields(MetaStoreUtils.getFieldsFromDeserializer(tbl.getName(), tbl.getDeserializer())); } tbl.checkValidity(); - getMSC().createTable(tbl.getTTable()); + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONDRYRUN)) { + tbl.setTemp(true); + getSessionMSC().createTable(tbl.getTTable()); + } else { + tbl.setTemp(false); + getMSC().createTable(tbl.getTTable()); + } } catch (AlreadyExistsException e) { if (!ifNotExists) { throw new HiveException(e); @@ -285,7 +298,19 @@ boolean ignoreUnknownTab) throws HiveException { try { - getMSC().dropTable(dbName, tableName, deleteData, ignoreUnknownTab); + boolean tableDropped = false; + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONDRYRUN)) { + try { + getSessionMSC().dropTable(dbName, tableName, deleteData, false); + tableDropped = true; + } + catch (NoSuchObjectException e) { + // Ignore if the table is not found + } + } + + if (!tableDropped) + getMSC().dropTable(dbName, tableName, deleteData, ignoreUnknownTab); } catch (NoSuchObjectException e) { if (!ignoreUnknownTab) { throw new HiveException(e); @@ -329,9 +354,21 @@ throw new HiveException("empty table creation??"); } Table table = new Table(); + boolean isTemp = false; org.apache.hadoop.hive.metastore.api.Table tTable = null; try { - tTable = getMSC().getTable(dbName, tableName); + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONDRYRUN)) { + try { + tTable = getSessionMSC().getTable(dbName, tableName); + isTemp = true; + } + catch (NoSuchObjectException e) { + // Ignore any no such object exception + } + } + + if (tTable == null) + tTable = getMSC().getTable(dbName, tableName); } catch (NoSuchObjectException e) { if(throwException) { LOG.error(StringUtils.stringifyException(e)); @@ -361,6 +398,7 @@ Properties p = MetaStoreUtils.getSchema(tTable); table.setSchema(p); table.setTTable(tTable); + table.setTemp(isTemp); table.setInputFormatClass((Class>) Class.forName(table.getSchema().getProperty(org.apache.hadoop.hive.metastore.api.Constants.FILE_INPUT_FORMAT, org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName()), @@ -416,7 +454,19 @@ */ public List getTablesForDb(String database, String tablePattern) throws HiveException { try { - return getMSC().getTables(database, tablePattern); + List retList = null; + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONDRYRUN)) { + retList = getSessionMSC().getTables(database, tablePattern); + } + + List moreRetList = getMSC().getTables(database, tablePattern); + + if (retList != null) + retList.addAll(moreRetList); + else + retList = moreRetList; + + return retList; } catch(Exception e) { throw new HiveException(e); } @@ -433,7 +483,7 @@ */ protected boolean createDatabase(String name, String locationUri) throws AlreadyExistsException, MetaException, TException { - return getMSC().createDatabase(name, locationUri); + return getMSC().createDatabase(name, locationUri); } /** @@ -556,7 +606,12 @@ try { Partition tmpPart = new Partition(tbl, partSpec, location); - partition = getMSC().add_partition(tmpPart.getTPartition()); + if (tbl.isTempTable()) { + partition = getSessionMSC().add_partition(tmpPart.getTPartition()); + } + else { + partition = getMSC().add_partition(tmpPart.getTPartition()); + } } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -574,7 +629,7 @@ * @throws HiveException */ public Partition getPartition(Table tbl, Map partSpec, boolean forceCreate) - throws HiveException { + throws HiveException { if(!tbl.isValidSpec(partSpec)) { throw new HiveException("Invalid partition: " + partSpec); } @@ -588,14 +643,25 @@ } org.apache.hadoop.hive.metastore.api.Partition tpart = null; try { - tpart = getMSC().getPartition(tbl.getDbName(), tbl.getName(), pvals); - if(tpart == null && forceCreate) { - LOG.debug("creating partition for table " + tbl.getName() + " with partition spec : " + partSpec); - tpart = getMSC().appendPartition(tbl.getDbName(), tbl.getName(), pvals);; + if (tbl.isTempTable()) { + tpart = getSessionMSC().getPartition(tbl.getDbName(), tbl.getName(), pvals); + if (tpart == null & forceCreate) { + LOG.debug("creating session level partition for table " + tbl.getName() + " with partition spec : " + partSpec); + tpart = getSessionMSC().appendPartition(tbl.getDbName(), tbl.getName(), pvals);; + } + if(tpart == null){ + return null; + } + } else { + tpart = getMSC().getPartition(tbl.getDbName(), tbl.getName(), pvals); + if(tpart == null && forceCreate) { + LOG.debug("creating partition for table " + tbl.getName() + " with partition spec : " + partSpec); + tpart = getMSC().appendPartition(tbl.getDbName(), tbl.getName(), pvals);; + } + if(tpart == null){ + return null; + } } - if(tpart == null){ - return null; - } } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -606,6 +672,15 @@ public boolean dropPartition(String db_name, String tbl_name, List part_vals, boolean deleteData) throws HiveException { try { + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONDRYRUN)) { + try { + return getSessionMSC().dropPartition(db_name, tbl_name, part_vals, deleteData); + } + catch (NoSuchObjectException e) { + // Ignore no such object exception + } + } + return getMSC().dropPartition(db_name, tbl_name, part_vals, deleteData); } catch (NoSuchObjectException e) { throw new HiveException("Partition or table doesn't exist.", e); @@ -615,9 +690,14 @@ } public List getPartitionNames(String dbName, String tblName, short max) throws HiveException { - List names = null; + List names = null; try { - names = getMSC().listPartitionNames(dbName, tblName, max); + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONDRYRUN)) { + names = getSessionMSC().listPartitionNames(dbName, tblName, max); + } + if (names == null) { + names = getMSC().listPartitionNames(dbName, tblName, max); + } } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -633,9 +713,13 @@ */ public List getPartitions(Table tbl) throws HiveException { if(tbl.isPartitioned()) { - List tParts; + List tParts = null; try { - tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getName(), (short) -1); + if (tbl.isTempTable()) { + tParts = getSessionMSC().listPartitions(tbl.getDbName(), tbl.getName(), (short) -1); + } else { + tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getName(), (short) -1); + } } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -792,7 +876,7 @@ * @throws HiveMetaException if a working client can't be created */ private IMetaStoreClient createMetaStoreClient() throws MetaException { - return new HiveMetaStoreClient(this.conf); + return new HiveMetaStoreClient("hive metastore", this.conf); } /** @@ -807,6 +891,28 @@ return metaStoreClient; } + private synchronized IMetaStoreClient getSessionMSC() throws MetaException { + if (sessionMSC == null) { + // Create an embedded derby and also set hive configuration variables so that + // a local metastore client is created that talks to this derby instance. + + // Clone the configuration + HiveConf localConf = (HiveConf)conf.clone(); + + // set certain specific fields + localConf.set("javax.jdo.option.ConnectionURL", "jdbc:derby:memory:SessionDB;create=true"); + localConf.set("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver"); + localConf.set("javax.jdo.option.ConnectionUserName", "APP"); + localConf.set("javax.jdo.option.ConnectionPassword", "mine"); + localConf.set("hive.metastore.local", "true"); + localConf.set("hive.metastore.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore"); + + sessionMSC = new HiveMetaStoreClient("hive session metastore", localConf); + } + + return sessionMSC; + } + public static List getFieldsFromDeserializer(String name, Deserializer serde) throws HiveException { try { return MetaStoreUtils.getFieldsFromDeserializer(name, serde); Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 822463) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -4660,6 +4660,15 @@ // from which we want to find the reduce operator genMapRedTasks(qb); + if (conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONDRYRUN) && + !ctx.getExplain()) { + ArrayList> newRootTasks = + new ArrayList>(); + + ExplainSemanticAnalyzer.addExplainTask(ctx, this, newRootTasks, + true, conf, ast); + rootTasks = newRootTasks; + } LOG.info("Completed plan generation"); return; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java (revision 822463) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java (working copy) @@ -36,6 +36,39 @@ super(conf); } + /** + * Function to add an explain task. This is called from the + * ExplainSemanticAnalyzer and also from the SemanticAnalyzer in case + * hive.session.test is set to true. + * + * @param ctx The context + * @param sem The base semantic analyzer + * @param rTasks The root task list + * @param extended Whether extended explain is needed or not + * @param conf HiveConf + * @param ast The parse treee + */ + public static void addExplainTask(Context ctx, BaseSemanticAnalyzer sem, + List> rTasks, boolean extended, + HiveConf conf, ASTNode ast) { + + ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); + List> tasks = sem.getRootTasks(); + Task fetchTask = sem.getFetchTask(); + if (tasks == null) { + if (fetchTask != null) { + tasks = new ArrayList>(); + tasks.add(fetchTask); + } + } + else if (fetchTask != null) + tasks.add(fetchTask); + + rTasks.add(TaskFactory.get(new explainWork(ctx.getResFile(), tasks, + ((ASTNode)ast.getChild(0)).toStringTree(), + extended), conf)); + } + public void analyzeInternal(ASTNode ast) throws SemanticException { ctx.setExplain(true); @@ -47,21 +80,8 @@ if (ast.getChildCount() > 1) { extended = true; } - - ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); - List> tasks = sem.getRootTasks(); - Task fetchTask = sem.getFetchTask(); - if (tasks == null) { - if (fetchTask != null) { - tasks = new ArrayList>(); - tasks.add(fetchTask); - } - } - else if (fetchTask != null) - tasks.add(fetchTask); - - rootTasks.add(TaskFactory.get(new explainWork(ctx.getResFile(), tasks, - ((ASTNode)ast.getChild(0)).toStringTree(), - extended), this.conf)); + + ExplainSemanticAnalyzer.addExplainTask(ctx, sem, rootTasks, + extended, conf, ast); } } Index: ql/src/java/org/apache/hadoop/hive/ql/Driver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java (revision 822463) +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java (working copy) @@ -519,7 +519,8 @@ } public boolean getResults(Vector res) throws IOException { - if (plan != null && plan.getPlan().getFetchTask() != null) { + if (plan != null && plan.getPlan().getFetchTask() != null && + !conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONDRYRUN)) { BaseSemanticAnalyzer sem = plan.getPlan(); if (!sem.getFetchTaskInit()) { sem.setFetchTaskInit(true);