Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java =================================================================== --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 807442) +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy) @@ -24,6 +24,7 @@ import java.util.Iterator; import java.util.Map; import java.util.Properties; +import java.util.Map.Entry; import javax.security.auth.login.LoginException; @@ -91,6 +92,7 @@ // session identifier HIVESESSIONID("hive.session.id", ""), + HIVESESSIONTEST("hive.session.test", false), // query being executed (multiple per session) HIVEQUERYSTRING("hive.query.string", ""), @@ -421,4 +423,17 @@ public static String getColumnInternalName(int pos){ return "_col"+pos; } + + /** + * Clone the configuration. + */ + @Override + public Object clone() { + HiveConf newConf = new HiveConf(); + for(Entry ent: this.getAllProperties().entrySet()) { + newConf.set((String)ent.getKey(), (String)ent.getValue()); + } + + return newConf; + } } Index: metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java =================================================================== --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (revision 807442) +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (working copy) @@ -48,7 +48,7 @@ super.setUp(); hiveConf = new HiveConf(this.getClass()); try { - client = new HiveMetaStoreClient(hiveConf); + client = new HiveMetaStoreClient("hive test metastore", hiveConf); } catch (Throwable e) { System.err.println("Unable to open the metastore"); System.err.println(StringUtils.stringifyException(e)); Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 807442) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -66,11 +67,10 @@ public static class HMSHandler extends FacebookBase implements ThriftHiveMetastore.Iface { public static final Log LOG = LogFactory.getLog(HiveMetaStore.class.getName()); - private static boolean createDefaultDB = false; private String rawStoreClassName; private HiveConf hiveConf; // stores datastore (jpox) properties, right now they come from jpox.properties private Warehouse wh; // hdfs warehouse - private ThreadLocal threadLocalMS = new ThreadLocal() { + private ThreadLocal> threadLocalMS = new ThreadLocal() { protected synchronized Object initialValue() { return null; } @@ -124,12 +124,17 @@ * @throws MetaException */ private RawStore getMS() throws MetaException { - RawStore ms = threadLocalMS.get(); + HashMap msMap = threadLocalMS.get(); + if (msMap == null) { + msMap = new HashMap(); + threadLocalMS.set(msMap); + } + + RawStore ms = msMap.get(this.getName()); if(ms == null) { LOG.info(threadLocalId.get() + ": Opening raw store with implemenation class:" + rawStoreClassName); ms = (RawStore) ReflectionUtils.newInstance(getClass(rawStoreClassName, RawStore.class), hiveConf); - threadLocalMS.set(ms); - ms = threadLocalMS.get(); + msMap.put(this.getName(), ms); } return ms; } @@ -139,7 +144,7 @@ * @throws MetaException */ private void createDefaultDB() throws MetaException { - if(HMSHandler.createDefaultDB || !checkForDefaultDb) { + if(!checkForDefaultDb) { return; } try { @@ -148,7 +153,6 @@ getMS().createDatabase(new Database(MetaStoreUtils.DEFAULT_DATABASE_NAME, wh.getDefaultDatabasePath(MetaStoreUtils.DEFAULT_DATABASE_NAME).toString())); } - HMSHandler.createDefaultDB = true; } private Class getClass(String rawStoreClassName, Class class1) throws MetaException { Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (revision 807442) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (working copy) @@ -63,7 +63,7 @@ static final private Log LOG = LogFactory.getLog("hive.metastore"); - public HiveMetaStoreClient(HiveConf conf) throws MetaException { + public HiveMetaStoreClient(String name, HiveConf conf) throws MetaException { if(conf == null) { conf = new HiveConf(HiveMetaStoreClient.class); } @@ -71,7 +71,7 @@ boolean localMetaStore = conf.getBoolean("hive.metastore.local", false); if(localMetaStore) { // instantiate the metastore server handler directly instead of connecting through the network - client = new HiveMetaStore.HMSHandler("hive client", conf); + client = new HiveMetaStore.HMSHandler(name, conf); this.open = true; return; } Index: ql/src/test/results/clientpositive/session_test.q.out =================================================================== --- ql/src/test/results/clientpositive/session_test.q.out (revision 0) +++ ql/src/test/results/clientpositive/session_test.q.out (revision 0) @@ -0,0 +1,170 @@ +query: CREATE TABLE session_test1(KEY STRING, VALUE STRING) PARTITIONED BY(ds DATETIME) STORED AS TEXTFILE +query: EXPLAIN +SELECT * FROM ( + SELECT key, value FROM src + UNION ALL + SELECT key, value FROM session_test1) a +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))))) (TOK_QUERY (TOK_FROM (TOK_TABREF session_test1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)))))) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + null-subquery1:a-subquery1:src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Union + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + null-subquery2:a-subquery2:session_test1 + TableScan + alias: session_test1 + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Union + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +query: SELECT src.key, session_test1.value FROM src JOIN session_test1 ON (src.key = session_test1.key) +Input: default/src +Output: file:/data/users/athusoo/commits/hive_trunk_ws9/build/ql/tmp/1662989415/10000 +ABSTRACT SYNTAX TREE: + (TOK_FROM (TOK_JOIN (TOK_TABREF src) (TOK_TABREF session_test1) (= (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL session_test1) key)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + src + TableScan + alias: src + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 0 + value expressions: + expr: key + type: string + session_test1 + TableScan + alias: session_test1 + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 1 + value expressions: + expr: value + type: string + Needs Tagging: true + Path -> Alias: + file:/data/users/athusoo/commits/hive_trunk_ws9/build/ql/test/data/warehouse/src [src] + Path -> Partition: + file:/data/users/athusoo/commits/hive_trunk_ws9/build/ql/test/data/warehouse/src + Partition + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + name src + columns.types string:string + serialization.ddl struct src { string key, string value} + serialization.format 1 + columns key,value + bucket_count -1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + file.inputformat org.apache.hadoop.mapred.TextInputFormat + file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + location file:/data/users/athusoo/commits/hive_trunk_ws9/build/ql/test/data/warehouse/src + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: src + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col0} + 1 {VALUE._col1} + outputColumnNames: _col0, _col3 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col3 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + directory: file:/data/users/athusoo/commits/hive_trunk_ws9/build/ql/tmp/1662989415/10001 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + serialization.format 1 + columns.types string:string + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +query: DROP TABLE session_test1 Index: ql/src/test/queries/clientpositive/session_test.q =================================================================== --- ql/src/test/queries/clientpositive/session_test.q (revision 0) +++ ql/src/test/queries/clientpositive/session_test.q (revision 0) @@ -0,0 +1,13 @@ +set hive.session.test=true; + +CREATE TABLE session_test1(KEY STRING, VALUE STRING) PARTITIONED BY(ds DATETIME) STORED AS TEXTFILE; + +EXPLAIN +SELECT * FROM ( + SELECT key, value FROM src + UNION ALL + SELECT key, value FROM session_test1) a; + +SELECT src.key, session_test1.value FROM src JOIN session_test1 ON (src.key = session_test1.key); + +DROP TABLE session_test1; Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (revision 807442) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (working copy) @@ -40,6 +40,7 @@ import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -68,6 +69,8 @@ static final private Log LOG = LogFactory.getLog("hive.ql.metadata.Hive"); static Hive db = null; + static IMetaStoreClient sessionMSC = null; + private HiveConf conf = null; private ThreadLocal threadLocalMSC = new ThreadLocal() { protected synchronized Object initialValue() { @@ -219,7 +222,20 @@ Table newTbl) throws InvalidOperationException, HiveException { try { - getMSC().alter_table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, newTbl.getTTable()); + boolean tableAltered = false; + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONTEST)) { + try { + getSessionMSC().alter_table(newTbl.getDbName(), tblName, newTbl.getTTable()); + tableAltered = true; + } + catch (MetaException e) { + // Ignore any meta exceptions + // TODO Possibly have different meta exceptions and only ignore the ones that correspond to table + // not found + } + } + if (!tableAltered) + getMSC().alter_table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, newTbl.getTTable()); } catch (MetaException e) { throw new HiveException("Unable to alter table.", e); } catch (TException e) { @@ -249,7 +265,11 @@ tbl.setFields(MetaStoreUtils.getFieldsFromDeserializer(tbl.getName(), tbl.getDeserializer())); } tbl.checkValidity(); - getMSC().createTable(tbl.getTTable()); + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONTEST)) { + getSessionMSC().createTable(tbl.getTTable()); + } else { + getMSC().createTable(tbl.getTTable()); + } } catch (AlreadyExistsException e) { if (!ifNotExists) { throw new HiveException(e); @@ -282,7 +302,19 @@ boolean ignoreUnknownTab) throws HiveException { try { - getMSC().dropTable(dbName, tableName, deleteData, ignoreUnknownTab); + boolean tableDropped = false; + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONTEST)) { + try { + getSessionMSC().dropTable(dbName, tableName, deleteData, ignoreUnknownTab); + tableDropped = true; + } + catch (NoSuchObjectException e) { + // Ignore if the table is not found + } + } + + if (!tableDropped) + getMSC().dropTable(dbName, tableName, deleteData, ignoreUnknownTab); } catch (NoSuchObjectException e) { if (!ignoreUnknownTab) { throw new HiveException(e); @@ -328,7 +360,17 @@ Table table = new Table(); org.apache.hadoop.hive.metastore.api.Table tTable = null; try { - tTable = getMSC().getTable(dbName, tableName); + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONTEST)) { + try { + tTable = getSessionMSC().getTable(dbName, tableName); + } + catch (NoSuchObjectException e) { + // Ignore any no such object exception + } + } + + if (tTable == null) + tTable = getMSC().getTable(dbName, tableName); } catch (NoSuchObjectException e) { if(throwException) { LOG.error(StringUtils.stringifyException(e)); @@ -395,7 +437,19 @@ */ public List getTablesByPattern(String tablePattern) throws HiveException { try { - return getMSC().getTables(MetaStoreUtils.DEFAULT_DATABASE_NAME, tablePattern); + List retList = null; + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONTEST)) { + retList = getSessionMSC().getTables(MetaStoreUtils.DEFAULT_DATABASE_NAME, tablePattern); + } + + List moreRetList = getMSC().getTables(MetaStoreUtils.DEFAULT_DATABASE_NAME, tablePattern); + + if (retList != null) + retList.addAll(moreRetList); + else + retList = moreRetList; + + return retList; } catch(Exception e) { throw new HiveException(e); } @@ -421,7 +475,7 @@ */ protected boolean createDatabase(String name, String locationUri) throws AlreadyExistsException, MetaException, TException { - return getMSC().createDatabase(name, locationUri); + return getMSC().createDatabase(name, locationUri); } /** @@ -544,7 +598,17 @@ try { Partition tmpPart = new Partition(tbl, partSpec, location); - partition = getMSC().add_partition(tmpPart.getTPartition()); + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONTEST)) { + try { + partition = getSessionMSC().add_partition(tmpPart.getTPartition()); + } + catch (InvalidObjectException e) { + // Ignore if the table cannot be located. + } + } + + if (partition == null) + partition = getMSC().add_partition(tmpPart.getTPartition()); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -576,13 +640,27 @@ } org.apache.hadoop.hive.metastore.api.Partition tpart = null; try { - tpart = getMSC().getPartition(tbl.getDbName(), tbl.getName(), pvals); - if(tpart == null && forceCreate) { - LOG.debug("creating partition for table " + tbl.getName() + " with partition spec : " + partSpec); - tpart = getMSC().appendPartition(tbl.getDbName(), tbl.getName(), pvals);; + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONTEST)) { + try { + tpart = getSessionMSC().getPartition(tbl.getDbName(), tbl.getName(), pvals); + if (tpart == null & forceCreate) { + LOG.debug("creating session level partition for table " + tbl.getName() + " with partition spec : " + partSpec); + tpart = getSessionMSC().appendPartition(tbl.getDbName(), tbl.getName(), pvals);; + } + } + catch (InvalidObjectException e) { + // Ignore this exception + } } - if(tpart == null){ - return null; + if (tpart == null) { + tpart = getMSC().getPartition(tbl.getDbName(), tbl.getName(), pvals); + if(tpart == null && forceCreate) { + LOG.debug("creating partition for table " + tbl.getName() + " with partition spec : " + partSpec); + tpart = getMSC().appendPartition(tbl.getDbName(), tbl.getName(), pvals);; + } + if(tpart == null){ + return null; + } } } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); @@ -594,6 +672,15 @@ public boolean dropPartition(String db_name, String tbl_name, List part_vals, boolean deleteData) throws HiveException { try { + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONTEST)) { + try { + return getSessionMSC().dropPartition(db_name, tbl_name, part_vals, deleteData); + } + catch (NoSuchObjectException e) { + // Ignore no such object exception + } + } + return getMSC().dropPartition(db_name, tbl_name, part_vals, deleteData); } catch (NoSuchObjectException e) { throw new HiveException("Partition or table doesn't exist.", e); @@ -603,9 +690,14 @@ } public List getPartitionNames(String dbName, String tblName, short max) throws HiveException { - List names = null; + List names = null; try { - names = getMSC().listPartitionNames(dbName, tblName, max); + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONTEST)) { + names = getSessionMSC().listPartitionNames(dbName, tblName, max); + } + if (names == null) { + names = getMSC().listPartitionNames(dbName, tblName, max); + } } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -621,9 +713,14 @@ */ public List getPartitions(Table tbl) throws HiveException { if(tbl.isPartitioned()) { - List tParts; + List tParts = null; try { - tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getName(), (short) -1); + if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONTEST)) { + tParts = getSessionMSC().listPartitions(tbl.getDbName(), tbl.getName(), (short) -1); + } + if (tParts == null) { + tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getName(), (short) -1); + } } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -772,7 +869,7 @@ * @throws HiveMetaException if a working client can't be created */ private IMetaStoreClient createMetaStoreClient() throws MetaException { - return new HiveMetaStoreClient(this.conf); + return new HiveMetaStoreClient("hive metastore", this.conf); } /** @@ -788,6 +885,28 @@ return msc; } + private synchronized IMetaStoreClient getSessionMSC() throws MetaException { + if (sessionMSC == null) { + // Create an embedded derby and also set hive configuration variables so that + // a local metastore client is created that talks to this derby instance. + + // Clone the configuration + HiveConf localConf = (HiveConf)conf.clone(); + + // set certain specific fields + localConf.set("javax.jdo.option.ConnectionURL", "jdbc:derby:memory:SessionDB;create=true"); + localConf.set("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver"); + localConf.set("javax.jdo.option.ConnectionUserName", "APP"); + localConf.set("javax.jdo.option.ConnectionPassword", "mine"); + localConf.set("hive.metastore.local", "true"); + localConf.set("hive.metastore.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore"); + + sessionMSC = new HiveMetaStoreClient("hive session metastore", localConf); + } + + return sessionMSC; + } + public static List getFieldsFromDeserializer(String name, Deserializer serde) throws HiveException { try { return MetaStoreUtils.getFieldsFromDeserializer(name, serde); Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 807442) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -4504,6 +4504,15 @@ // from which we want to find the reduce operator genMapRedTasks(qb); + if (conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONTEST) && + !ctx.getExplain()) { + ArrayList> newRootTasks = + new ArrayList>(); + + ExplainSemanticAnalyzer.addExplainTask(ctx, this, newRootTasks, + true, conf, ast); + rootTasks = newRootTasks; + } LOG.info("Completed plan generation"); return; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java (revision 807442) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java (working copy) @@ -36,6 +36,39 @@ super(conf); } + /** + * Function to add an explain task. This is called from the + * ExplainSemanticAnalyzer and also from the SemanticAnalyzer in case + * hive.session.test is set to true. + * + * @param ctx The context + * @param sem The base semantic analyzer + * @param rTasks The root task list + * @param extended Whether extended explain is needed or not + * @param conf HiveConf + * @param ast The parse treee + */ + public static void addExplainTask(Context ctx, BaseSemanticAnalyzer sem, + List> rTasks, boolean extended, + HiveConf conf, ASTNode ast) { + + ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); + List> tasks = sem.getRootTasks(); + Task fetchTask = sem.getFetchTask(); + if (tasks == null) { + if (fetchTask != null) { + tasks = new ArrayList>(); + tasks.add(fetchTask); + } + } + else if (fetchTask != null) + tasks.add(fetchTask); + + rTasks.add(TaskFactory.get(new explainWork(ctx.getResFile(), tasks, + ((ASTNode)ast.getChild(0)).toStringTree(), + extended), conf)); + } + public void analyzeInternal(ASTNode ast) throws SemanticException { ctx.setExplain(true); @@ -47,21 +80,8 @@ if (ast.getChildCount() > 1) { extended = true; } - - ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); - List> tasks = sem.getRootTasks(); - Task fetchTask = sem.getFetchTask(); - if (tasks == null) { - if (fetchTask != null) { - tasks = new ArrayList>(); - tasks.add(fetchTask); - } - } - else if (fetchTask != null) - tasks.add(fetchTask); - - rootTasks.add(TaskFactory.get(new explainWork(ctx.getResFile(), tasks, - ((ASTNode)ast.getChild(0)).toStringTree(), - extended), this.conf)); + + ExplainSemanticAnalyzer.addExplainTask(ctx, sem, rootTasks, + extended, conf, ast); } } Index: ql/src/java/org/apache/hadoop/hive/ql/Driver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java (revision 807442) +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java (working copy) @@ -436,7 +436,8 @@ } public boolean getResults(Vector res) throws IOException { - if (plan != null && plan.getPlan().getFetchTask() != null) { + if (plan != null && plan.getPlan().getFetchTask() != null && + !conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONTEST)) { BaseSemanticAnalyzer sem = plan.getPlan(); if (!sem.getFetchTaskInit()) { sem.setFetchTaskInit(true);