Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
===================================================================
--- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1096599)
+++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy)
@@ -430,6 +430,7 @@
// deployment. It has not been documented in hive-default.xml intentionally, this should be removed
// once the feature is stable
HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS("hive.mapper.cannot.span.multiple.partitions", false),
+ HIVE_CONCATENATE_CHECK_INDEX ("hive.exec.concatenate.check.index", true),
;
Index: conf/hive-default.xml
===================================================================
--- conf/hive-default.xml (revision 1096599)
+++ conf/hive-default.xml (working copy)
@@ -1038,4 +1038,12 @@
Insert queries are not restricted by this limit.
+
+ hive.exec.concatenate.check.index
+ true
+ If this sets to true, hive will throw error when doing
+ 'alter table tbl_name [partSpec] concatenate' on a table/partition
+ that has indexes on it.
+
+
Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1096599)
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy)
@@ -403,6 +403,7 @@
BlockMergeTask taskExec = new BlockMergeTask();
taskExec.initialize(db.getConf(), null, driverCxt);
taskExec.setWork(mergeWork);
+ taskExec.setQueryPlan(this.getQueryPlan());
int ret = taskExec.execute(driverCxt);
return ret;
Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java (revision 1096599)
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java (working copy)
@@ -486,4 +486,12 @@
public void setRetryCmdWhenFail(boolean retryCmdWhenFail) {
this.retryCmdWhenFail = retryCmdWhenFail;
}
+
+ public QueryPlan getQueryPlan() {
+ return queryPlan;
+ }
+
+ public void setQueryPlan(QueryPlan queryPlan) {
+ this.queryPlan = queryPlan;
+ }
}
Index: ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java (revision 1096599)
+++ ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java (working copy)
@@ -111,6 +111,9 @@
if(work.getNumMapTasks() != null) {
job.setNumMapTasks(work.getNumMapTasks());
}
+
+ // zero reducers
+ job.setNumReduceTasks(0);
if (work.getMinSplitSize() != null) {
HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work
@@ -146,11 +149,18 @@
RunningJob rj = null;
boolean noName = StringUtils.isEmpty(HiveConf.getVar(job,
HiveConf.ConfVars.HADOOPJOBNAME));
-
+
+ String jobName = null;
+ if (noName && this.getQueryPlan() != null) {
+ int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
+ jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(),
+ maxlen - 6);
+ }
+
if (noName) {
// This is for a special case to ensure unit tests pass
- HiveConf.setVar(job, HiveConf.ConfVars.HADOOPJOBNAME, "JOB"
- + Utilities.randGen.nextInt());
+ HiveConf.setVar(job, HiveConf.ConfVars.HADOOPJOBNAME,
+ jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
}
try {
Index: ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (revision 1096599)
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (working copy)
@@ -25,6 +25,8 @@
import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_SHOWDATABASES;
import java.io.Serializable;
+import java.net.URI;
+import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
@@ -83,7 +85,9 @@
import org.apache.hadoop.hive.ql.plan.FetchWork;
import org.apache.hadoop.hive.ql.plan.GrantDesc;
import org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL;
+import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
import org.apache.hadoop.hive.ql.plan.LockTableDesc;
+import org.apache.hadoop.hive.ql.plan.MoveWork;
import org.apache.hadoop.hive.ql.plan.MsckDesc;
import org.apache.hadoop.hive.ql.plan.PrincipalDesc;
import org.apache.hadoop.hive.ql.plan.PrivilegeDesc;
@@ -1116,19 +1120,24 @@
tableName, partSpec);
List inputDir = new ArrayList();
- String outputDir = null;
+ String tblPartLoc = null;
+ Table tblObj = null;
try {
- Table tblObj = db.getTable(tableName);
+ tblObj = db.getTable(tableName);
List bucketCols = null;
Class extends InputFormat> inputFormatClass = null;
boolean isArchived = false;
- List indexes = db.getIndexes(tblObj.getDbName(), tableName,
- Short.MAX_VALUE);
- if (indexes != null && indexes.size() > 0) {
- throw new SemanticException("can not do merge because source table "
- + tableName + " is indexed.");
+ boolean checkIndex = HiveConf.getBoolVar(conf,
+ HiveConf.ConfVars.HIVE_CONCATENATE_CHECK_INDEX);
+ if(checkIndex) {
+ List indexes = db.getIndexes(tblObj.getDbName(), tableName,
+ Short.MAX_VALUE);
+ if (indexes != null && indexes.size() > 0) {
+ throw new SemanticException("can not do merge because source table "
+ + tableName + " is indexed.");
+ }
}
if (tblObj.isPartitioned()) {
@@ -1144,12 +1153,12 @@
bucketCols = part.getBucketCols();
inputFormatClass = part.getInputFormatClass();
isArchived = Utilities.isArchived(part);
- outputDir = part.getDataLocation().toString();
+ tblPartLoc = part.getDataLocation().toString();
}
} else {
inputFormatClass = tblObj.getInputFormatClass();
bucketCols = tblObj.getBucketCols();
- outputDir = tblObj.getDataLocation().toString();
+ tblPartLoc = tblObj.getDataLocation().toString();
}
// throw a HiveException for non-rcfile.
@@ -1169,29 +1178,34 @@
throw new SemanticException(
"Merge can not perform on archived partitions.");
}
- } catch (HiveException e) {
- throw new SemanticException(e);
- }
-
- // input and output are the same
- inputDir.add(outputDir);
-
- mergeDesc.setInputDir(inputDir);
- mergeDesc.setOutputDir(outputDir);
- addInputsOutputsAlterTable(tableName, partSpec);
+ // input and output are the same
+ inputDir.add(tblPartLoc);
- DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), mergeDesc);
- ddlWork.setNeedLock(true);
- Task extends Serializable> mergeTask = TaskFactory.get(ddlWork, conf);
+ mergeDesc.setInputDir(inputDir);
- tableSpec tablepart = new tableSpec(this.db, conf, tablePartAST);
- StatsWork statDesc = new StatsWork(tablepart);
- statDesc.setNoStatsAggregator(true);
- Task extends Serializable> statTask = TaskFactory.get(statDesc, conf);
- mergeTask.addDependentTask(statTask);
+ addInputsOutputsAlterTable(tableName, partSpec);
+ DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), mergeDesc);
+ ddlWork.setNeedLock(true);
+ Task extends Serializable> mergeTask = TaskFactory.get(ddlWork, conf);
+ TableDesc tblDesc = Utilities.getTableDesc(tblObj);
+ String queryTmpdir = ctx.getExternalTmpFileURI(new URI(tblPartLoc));
+ mergeDesc.setOutputDir(queryTmpdir);
+ LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, queryTmpdir, tblDesc,
+ partSpec == null ? new HashMap() : partSpec);
+ Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false),
+ conf);
+ mergeTask.addDependentTask(moveTsk);
+ tableSpec tablepart = new tableSpec(this.db, conf, tablePartAST);
+ StatsWork statDesc = new StatsWork(tablepart);
+ statDesc.setNoStatsAggregator(true);
+ Task extends Serializable> statTask = TaskFactory.get(statDesc, conf);
+ moveTsk.addDependentTask(statTask);
- rootTasks.add(mergeTask);
+ rootTasks.add(mergeTask);
+ } catch (Exception e) {
+ throw new SemanticException(e);
+ }
}
private void analyzeAlterTableClusterSort(ASTNode ast)
Index: ql/src/test/queries/clientnegative/alter_merge_index.q
===================================================================
--- ql/src/test/queries/clientnegative/alter_merge_index.q (revision 0)
+++ ql/src/test/queries/clientnegative/alter_merge_index.q (revision 0)
@@ -0,0 +1,15 @@
+create table src_rc_merge_test(key int, value string) stored as rcfile;
+
+load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test;
+load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test;
+load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test;
+
+show table extended like `src_rc_merge_test`;
+
+select count(1) from src_rc_merge_test;
+select sum(hash(key)), sum(hash(value)) from src_rc_merge_test;
+
+create index src_rc_merge_test_index on table src_rc_merge_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2");
+show indexes on src_rc_merge_test;
+
+alter table src_rc_merge_test concatenate;
Index: ql/src/test/queries/clientpositive/alter_merge_index.q
===================================================================
--- ql/src/test/queries/clientpositive/alter_merge_index.q (revision 0)
+++ ql/src/test/queries/clientpositive/alter_merge_index.q (revision 0)
@@ -0,0 +1,51 @@
+set hive.exec.concatenate.check.index =false;
+create table src_rc_merge_test(key int, value string) stored as rcfile;
+
+load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test;
+load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test;
+load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test;
+
+show table extended like `src_rc_merge_test`;
+
+select count(1) from src_rc_merge_test;
+select sum(hash(key)), sum(hash(value)) from src_rc_merge_test;
+
+create index src_rc_merge_test_index on table src_rc_merge_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2");
+show indexes on src_rc_merge_test;
+
+alter table src_rc_merge_test concatenate;
+
+show table extended like `src_rc_merge_test`;
+
+select count(1) from src_rc_merge_test;
+select sum(hash(key)), sum(hash(value)) from src_rc_merge_test;
+
+drop index src_rc_merge_test_index on src_rc_merge_test;
+
+create table src_rc_merge_test_part(key int, value string) partitioned by (ds string) stored as rcfile;
+
+alter table src_rc_merge_test_part add partition (ds='2011');
+
+load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2011');
+load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2011');
+load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2011');
+
+show table extended like `src_rc_merge_test_part` partition (ds='2011');
+
+select count(1) from src_rc_merge_test_part;
+select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part;
+
+create index src_rc_merge_test_part_index on table src_rc_merge_test_part(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2");
+show indexes on src_rc_merge_test_part;
+
+alter table src_rc_merge_test_part partition (ds='2011') concatenate;
+
+show table extended like `src_rc_merge_test_part` partition (ds='2011');
+
+select count(1) from src_rc_merge_test_part;
+select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part;
+
+drop index src_rc_merge_test_part_index on src_rc_merge_test_part;
+
+drop table src_rc_merge_test;
+drop table src_rc_merge_test_part;
Index: ql/src/test/results/clientnegative/alter_merge_index.q.out
===================================================================
--- ql/src/test/results/clientnegative/alter_merge_index.q.out (revision 0)
+++ ql/src/test/results/clientnegative/alter_merge_index.q.out (revision 0)
@@ -0,0 +1,70 @@
+PREHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_merge_test
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_merge_test
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_merge_test
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: show table extended like `src_rc_merge_test`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_rc_merge_test`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_rc_merge_test
+owner:heyongqiang
+location:pfile:/Users/heyongqiang/Documents/workspace/Hive-3/build/ql/test/data/warehouse/src_rc_merge_test
+inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:3
+totalFileSize:636
+maxFileSize:222
+minFileSize:206
+lastAccessTime:0
+lastUpdateTime:1303771235000
+
+PREHOOK: query: select count(1) from src_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_merge_test
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-40-35_954_7245125799355678250/-mr-10000
+POSTHOOK: query: select count(1) from src_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_merge_test
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-40-35_954_7245125799355678250/-mr-10000
+15
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_merge_test
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-40-44_254_8986394369265464050/-mr-10000
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_merge_test
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-40-44_254_8986394369265464050/-mr-10000
+214 -7678496319
+PREHOOK: query: create index src_rc_merge_test_index on table src_rc_merge_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
+PREHOOK: type: CREATEINDEX
+POSTHOOK: query: create index src_rc_merge_test_index on table src_rc_merge_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
+POSTHOOK: type: CREATEINDEX
+PREHOOK: query: show indexes on src_rc_merge_test
+PREHOOK: type: SHOWINDEXES
+POSTHOOK: query: show indexes on src_rc_merge_test
+POSTHOOK: type: SHOWINDEXES
+src_rc_merge_test_index src_rc_merge_test key default__src_rc_merge_test_src_rc_merge_test_index__ compact
+FAILED: Error in semantic analysis: org.apache.hadoop.hive.ql.parse.SemanticException: can not do merge because source table src_rc_merge_test is indexed.
Index: ql/src/test/results/clientpositive/alter_merge_index.q.out
===================================================================
--- ql/src/test/results/clientpositive/alter_merge_index.q.out (revision 0)
+++ ql/src/test/results/clientpositive/alter_merge_index.q.out (revision 0)
@@ -0,0 +1,259 @@
+PREHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_merge_test
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_merge_test
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_merge_test
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: show table extended like `src_rc_merge_test`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_rc_merge_test`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_rc_merge_test
+owner:heyongqiang
+location:pfile:/Users/heyongqiang/Documents/workspace/Hive-3/build/ql/test/data/warehouse/src_rc_merge_test
+inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:3
+totalFileSize:636
+maxFileSize:222
+minFileSize:206
+lastAccessTime:0
+lastUpdateTime:1303771056000
+
+PREHOOK: query: select count(1) from src_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_merge_test
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-37-37_102_8950921835330321013/-mr-10000
+POSTHOOK: query: select count(1) from src_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_merge_test
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-37-37_102_8950921835330321013/-mr-10000
+15
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_merge_test
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-37-44_428_4310208755558136304/-mr-10000
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_merge_test
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-37-44_428_4310208755558136304/-mr-10000
+214 -7678496319
+PREHOOK: query: create index src_rc_merge_test_index on table src_rc_merge_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
+PREHOOK: type: CREATEINDEX
+POSTHOOK: query: create index src_rc_merge_test_index on table src_rc_merge_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
+POSTHOOK: type: CREATEINDEX
+PREHOOK: query: show indexes on src_rc_merge_test
+PREHOOK: type: SHOWINDEXES
+POSTHOOK: query: show indexes on src_rc_merge_test
+POSTHOOK: type: SHOWINDEXES
+src_rc_merge_test_index src_rc_merge_test key default__src_rc_merge_test_src_rc_merge_test_index__ compact
+PREHOOK: query: alter table src_rc_merge_test concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@src_rc_merge_test
+PREHOOK: Output: default@src_rc_merge_test
+POSTHOOK: query: alter table src_rc_merge_test concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@src_rc_merge_test
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: show table extended like `src_rc_merge_test`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_rc_merge_test`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_rc_merge_test
+owner:heyongqiang
+location:pfile:/Users/heyongqiang/Documents/workspace/Hive-3/build/ql/test/data/warehouse/src_rc_merge_test
+inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:1
+totalFileSize:334
+maxFileSize:334
+minFileSize:334
+lastAccessTime:0
+lastUpdateTime:1303771077000
+
+PREHOOK: query: select count(1) from src_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_merge_test
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-37-58_256_2454422456297339159/-mr-10000
+POSTHOOK: query: select count(1) from src_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_merge_test
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-37-58_256_2454422456297339159/-mr-10000
+15
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_merge_test
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-38-05_416_6309963826290374662/-mr-10000
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_merge_test
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-38-05_416_6309963826290374662/-mr-10000
+214 -7678496319
+PREHOOK: query: drop index src_rc_merge_test_index on src_rc_merge_test
+PREHOOK: type: DROPINDEX
+POSTHOOK: query: drop index src_rc_merge_test_index on src_rc_merge_test
+POSTHOOK: type: DROPINDEX
+PREHOOK: query: create table src_rc_merge_test_part(key int, value string) partitioned by (ds string) stored as rcfile
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table src_rc_merge_test_part(key int, value string) partitioned by (ds string) stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@src_rc_merge_test_part
+PREHOOK: query: alter table src_rc_merge_test_part add partition (ds='2011')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Input: default@src_rc_merge_test_part
+POSTHOOK: query: alter table src_rc_merge_test_part add partition (ds='2011')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Input: default@src_rc_merge_test_part
+POSTHOOK: Output: default@src_rc_merge_test_part@ds=2011
+PREHOOK: query: load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2011')
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_merge_test_part@ds=2011
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2011')
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_merge_test_part@ds=2011
+PREHOOK: query: load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2011')
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_merge_test_part@ds=2011
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2011')
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_merge_test_part@ds=2011
+PREHOOK: query: load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2011')
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_merge_test_part@ds=2011
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2011')
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_merge_test_part@ds=2011
+PREHOOK: query: show table extended like `src_rc_merge_test_part` partition (ds='2011')
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_rc_merge_test_part` partition (ds='2011')
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_rc_merge_test_part
+owner:heyongqiang
+location:pfile:/Users/heyongqiang/Documents/workspace/Hive-3/build/ql/test/data/warehouse/src_rc_merge_test_part/ds=2011
+inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:true
+partitionColumns:struct partition_columns { string ds}
+totalNumberFiles:3
+totalFileSize:636
+maxFileSize:222
+minFileSize:206
+lastAccessTime:0
+lastUpdateTime:1303771098000
+
+PREHOOK: query: select count(1) from src_rc_merge_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_merge_test_part@ds=2011
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-38-18_800_4503332771009893828/-mr-10000
+POSTHOOK: query: select count(1) from src_rc_merge_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_merge_test_part@ds=2011
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-38-18_800_4503332771009893828/-mr-10000
+15
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_merge_test_part@ds=2011
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-38-26_650_4569478024948505765/-mr-10000
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_merge_test_part@ds=2011
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-38-26_650_4569478024948505765/-mr-10000
+214 -7678496319
+PREHOOK: query: create index src_rc_merge_test_part_index on table src_rc_merge_test_part(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
+PREHOOK: type: CREATEINDEX
+POSTHOOK: query: create index src_rc_merge_test_part_index on table src_rc_merge_test_part(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
+POSTHOOK: type: CREATEINDEX
+PREHOOK: query: show indexes on src_rc_merge_test_part
+PREHOOK: type: SHOWINDEXES
+POSTHOOK: query: show indexes on src_rc_merge_test_part
+POSTHOOK: type: SHOWINDEXES
+src_rc_merge_test_part_index src_rc_merge_test_part key default__src_rc_merge_test_part_src_rc_merge_test_part_index__ compact
+PREHOOK: query: alter table src_rc_merge_test_part partition (ds='2011') concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@src_rc_merge_test_part
+PREHOOK: Output: default@src_rc_merge_test_part@ds=2011
+POSTHOOK: query: alter table src_rc_merge_test_part partition (ds='2011') concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@src_rc_merge_test_part
+POSTHOOK: Output: default@src_rc_merge_test_part@ds=2011
+PREHOOK: query: show table extended like `src_rc_merge_test_part` partition (ds='2011')
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_rc_merge_test_part` partition (ds='2011')
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_rc_merge_test_part
+owner:heyongqiang
+location:pfile:/Users/heyongqiang/Documents/workspace/Hive-3/build/ql/test/data/warehouse/src_rc_merge_test_part/ds=2011
+inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:true
+partitionColumns:struct partition_columns { string ds}
+totalNumberFiles:1
+totalFileSize:334
+maxFileSize:334
+minFileSize:334
+lastAccessTime:0
+lastUpdateTime:1303771116000
+
+PREHOOK: query: select count(1) from src_rc_merge_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_merge_test_part@ds=2011
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-38-37_225_2085086948696537861/-mr-10000
+POSTHOOK: query: select count(1) from src_rc_merge_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_merge_test_part@ds=2011
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-38-37_225_2085086948696537861/-mr-10000
+15
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_merge_test_part@ds=2011
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-38-44_345_8409871309064325907/-mr-10000
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_merge_test_part@ds=2011
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-25_15-38-44_345_8409871309064325907/-mr-10000
+214 -7678496319
+PREHOOK: query: drop index src_rc_merge_test_part_index on src_rc_merge_test_part
+PREHOOK: type: DROPINDEX
+POSTHOOK: query: drop index src_rc_merge_test_part_index on src_rc_merge_test_part
+POSTHOOK: type: DROPINDEX
+PREHOOK: query: drop table src_rc_merge_test
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_rc_merge_test
+PREHOOK: Output: default@src_rc_merge_test
+POSTHOOK: query: drop table src_rc_merge_test
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_rc_merge_test
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: drop table src_rc_merge_test_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_rc_merge_test_part
+PREHOOK: Output: default@src_rc_merge_test_part
+POSTHOOK: query: drop table src_rc_merge_test_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_rc_merge_test_part
+POSTHOOK: Output: default@src_rc_merge_test_part