diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 541af57..cba6b61 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1519,7 +1519,9 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal // Statistics HIVESTATSAUTOGATHER("hive.stats.autogather", true, - "A flag to gather statistics automatically during the INSERT OVERWRITE command."), + "A flag to gather statistics (only basic) automatically during the INSERT OVERWRITE command."), + HIVESTATSCOLAUTOGATHER("hive.stats.column.autogather", false, + "A flag to gather column statistics automatically."), HIVESTATSDBCLASS("hive.stats.dbclass", "fs", new PatternSet("custom", "fs"), "The storage that stores temporary Hive statistics. In filesystem based statistics collection ('fs'), \n" + "each task writes statistics it has collected in a file on the filesystem, which will be aggregated \n" + diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index c891d40..2138f33 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -75,6 +75,7 @@ minitez.query.files.shared=acid_globallimit.q,\ alter_merge_2_orc.q,\ alter_merge_orc.q,\ alter_merge_stats_orc.q,\ + autoColumnStats_2.q,\ auto_join0.q,\ auto_join1.q,\ bucket2.q,\ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java index d8ac6ae..651b559 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java @@ -620,7 +620,7 @@ private boolean needConversion(PartitionDesc partitionDesc) { } // if table and all partitions have the same schema and serde, no need to convert - private boolean needConversion(TableDesc tableDesc, List partDescs) { + public static boolean needConversion(TableDesc tableDesc, List partDescs) { Class tableSerDe = tableDesc.getDeserializerClass(); SerDeSpec spec = AnnotationUtils.getAnnotation(tableSerDe, SerDeSpec.class); if (null == spec) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java index d213731..5497cb2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java @@ -688,7 +688,7 @@ private int getPrecisionForType(PrimitiveTypeInfo typeInfo) { return HiveDecimalUtils.getPrecisionForType(typeInfo); } - private GenericUDF getGenericUDFForCast(TypeInfo castType) throws HiveException { + public static GenericUDF getGenericUDFForCast(TypeInfo castType) throws HiveException { UDF udfClass = null; GenericUDF genericUdf = null; switch (((PrimitiveTypeInfo) castType).getPrimitiveCategory()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java index 9fbbd4c..d9f9851 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; import org.apache.hadoop.hive.ql.parse.ParseContext; @@ -117,13 +118,23 @@ public static void processSkewJoin(JoinOperator joinOp, } List> children = currTask.getChildTasks(); - if (children != null && children.size() > 1) { - throw new SemanticException("Should not happened"); + // It is actually looking for the MapredWork + Task child = null; + if (children == null || children.size() == 0) { + throw new SemanticException("Expecting only one MapredLocalTask, but find none"); + } else { + for (Task t : children) { + if (t instanceof MapredLocalTask) { + if (child != null) { + throw new SemanticException( + "Expecting only one MapredLocalTask, but find more than one"); + } else { + child = t; + } + } + } } - Task child = - children != null && children.size() == 1 ? children.get(0) : null; - Path baseTmpDir = parseCtx.getContext().getMRTmpPath(); JoinDesc joinDescriptor = joinOp.getConf(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java index 02c5a89..56205de 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java @@ -446,7 +446,7 @@ private static PrunedPartitionList getPartitionsFromServer(Table tab, } } - private static Set getAllPartitions(Table tab) throws HiveException { + public static Set getAllPartitions(Table tab) throws HiveException { PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING); Set result = Hive.get().getAllPartitionsOf(tab); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java new file mode 100644 index 0000000..4ac2278 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java @@ -0,0 +1,287 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.RowSchema; +import org.apache.hadoop.hive.ql.exec.SelectOperator; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.optimizer.ConstantPropagate; +import org.apache.hadoop.hive.ql.optimizer.Optimizer; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContext; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.LoadFileDesc; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.SelectDesc; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; + +/** + * ColumnStatsAutoGatherContext: This is passed to the compiler when set + * hive.stats.autogather=true during the INSERT OVERWRITE command. + * + **/ + +public class ColumnStatsAutoGatherContext { + + public AnalyzeRewriteContext analyzeRewrite; + private final List loadFileWork = new ArrayList<>(); + private final SemanticAnalyzer sa; + private final HiveConf conf; + private final Operator op; + private final List columns; + private final List partitionColumns; + private boolean isInsertInto; + private Table tbl; + private Map partSpec; + + public ColumnStatsAutoGatherContext(SemanticAnalyzer sa, HiveConf conf, + Operator op, Table tbl, Map partSpec, + boolean isInsertInto) throws SemanticException { + super(); + this.sa = sa; + this.conf = conf; + this.op = op; + this.tbl = tbl; + this.partSpec = partSpec; + this.isInsertInto = isInsertInto; + columns = tbl.getCols(); + partitionColumns = tbl.getPartCols(); + } + + public List getLoadFileWork() { + return loadFileWork; + } + + public AnalyzeRewriteContext getAnalyzeRewrite() { + return analyzeRewrite; + } + + public void setAnalyzeRewrite(AnalyzeRewriteContext analyzeRewrite) { + this.analyzeRewrite = analyzeRewrite; + } + + public void insertAnalyzePipeline() throws SemanticException{ + // 1. Generate the statement of analyze table [tablename] compute statistics for columns + String analyzeCommand = "analyze table `" + tbl.getDbName() + "`.`" + tbl.getTableName() + "`" + + " compute statistics for columns "; + + // 2. + // In non-partitioned table case, it will generate TS-SEL-GBY-RS-GBY-SEL-FS operator + // In static-partitioned table case, it will generate TS-FIL(partitionKey)-SEL-GBY(partitionKey)-RS-GBY-SEL-FS operator + // In dynamic-partitioned table case, it will generate TS-SEL-GBY(partitionKey)-RS-GBY-SEL-FS operator + Operator selOp = null; + try { + selOp = genSelOpForAnalyze(analyzeCommand); + } catch (IOException | ParseException e) { + throw new SemanticException(e); + } + + // 3. attach this SEL to the operator right before FS + op.getChildOperators().add(selOp); + selOp.getParentOperators().clear(); + selOp.getParentOperators().add(op); + + // 4. address the colExp, colList, etc for the SEL + try { + replaceSelectOperatorProcess((SelectOperator)selOp, op); + } catch (HiveException e) { + throw new SemanticException(e); + } + } + + @SuppressWarnings("rawtypes") + private Operator genSelOpForAnalyze(String analyzeCommand) throws IOException, ParseException, SemanticException{ + //0. initialization + Context ctx = new Context(conf); + ParseDriver pd = new ParseDriver(); + ASTNode tree = pd.parse(analyzeCommand, ctx); + tree = ParseUtils.findRootNonNullToken(tree); + + //1. get the ColumnStatsSemanticAnalyzer + BaseSemanticAnalyzer baseSem = SemanticAnalyzerFactory.get(new QueryState(conf), tree); + ColumnStatsSemanticAnalyzer colSem = (ColumnStatsSemanticAnalyzer) baseSem; + + //2. get the rewritten AST + ASTNode ast = colSem.getRewriteASTOnly(tree, this); + baseSem = SemanticAnalyzerFactory.get(new QueryState(conf), ast); + SemanticAnalyzer sem = (SemanticAnalyzer) baseSem; + QB qb = new QB(null, null, false); + ASTNode child = ast; + ParseContext subPCtx = ((SemanticAnalyzer) sem).getParseContext(); + subPCtx.setContext(ctx); + ((SemanticAnalyzer) sem).initParseCtx(subPCtx); + sem.doPhase1(child, qb, sem.initPhase1Ctx(), null); + sem.getMetaData(qb); + Operator operator = sem.genPlan(qb); + + //3. populate the load file work so that ColumnStatsTask can work + loadFileWork.addAll(sem.getLoadFileWork()); + + //4. because there is only one TS for analyze statement, we can get it. + operator = sem.topOps.values().iterator().next(); + + //5. get the first SEL after TS + while(!(operator instanceof SelectOperator)){ + operator = operator.getChildOperators().get(0); + } + return operator; + } + + /** + * @param operator : the select operator in the analyze statement + * @param input : the operator right before FS in the insert overwrite statement + * @throws HiveException + */ + private void replaceSelectOperatorProcess(SelectOperator operator, Operator input) + throws HiveException { + RowSchema selRS = operator.getSchema(); + ArrayList signature = new ArrayList<>(); + OpParseContext inputCtx = sa.opParseCtx.get(input); + RowResolver inputRR = inputCtx.getRowResolver(); + ArrayList columns = inputRR.getColumnInfos(); + ArrayList colList = new ArrayList(); + ArrayList columnNames = new ArrayList(); + Map columnExprMap = + new HashMap(); + // the column positions in the operator should be like this + // <----non-partition columns---->|<--static partition columns-->|<--dynamic partition columns--> + // ExprNodeColumnDesc | ExprNodeConstantDesc | ExprNodeColumnDesc + // from input | generate itself | from input + // | + + // 1. deal with non-partition columns + for (int i = 0; i < this.columns.size(); i++) { + ColumnInfo col = columns.get(i); + ExprNodeDesc exprNodeDesc = new ExprNodeColumnDesc(col); + colList.add(exprNodeDesc); + String internalName = selRS.getColumnNames().get(i); + columnNames.add(internalName); + columnExprMap.put(internalName, exprNodeDesc); + signature.add(selRS.getSignature().get(i)); + } + // if there is any partition column (in static partition or dynamic + // partition or mixed case) + int dynamicPartBegin = -1; + for (int i = 0; i < partitionColumns.size(); i++) { + ExprNodeDesc exprNodeDesc = null; + String partColName = partitionColumns.get(i).getName(); + // 2. deal with static partition columns + if (partSpec != null && partSpec.containsKey(partColName) + && partSpec.get(partColName) != null) { + if (dynamicPartBegin > 0) { + throw new SemanticException( + "Dynamic partition columns should not come before static partition columns."); + } + exprNodeDesc = new ExprNodeConstantDesc(partSpec.get(partColName)); + TypeInfo srcType = exprNodeDesc.getTypeInfo(); + TypeInfo destType = selRS.getSignature().get(this.columns.size() + i).getType(); + if (!srcType.equals(destType)) { + // TODO: maybe we should use ParseUtils.createConversionCast(); + GenericUDF castUdf = VectorizationContext.getGenericUDFForCast(destType); + exprNodeDesc = new ExprNodeGenericFuncDesc(destType, castUdf, + Arrays.asList(new ExprNodeDesc[] { exprNodeDesc })); + } + } + // 3. dynamic partition columns + else { + dynamicPartBegin++; + ColumnInfo col = columns.get(this.columns.size() + dynamicPartBegin); + TypeInfo srcType = col.getType(); + TypeInfo destType = selRS.getSignature().get(this.columns.size() + i).getType(); + exprNodeDesc = new ExprNodeColumnDesc(col); + if (!srcType.equals(destType)) { + GenericUDF castUdf = VectorizationContext.getGenericUDFForCast(destType); + exprNodeDesc = new ExprNodeGenericFuncDesc(destType, castUdf, + Arrays.asList(new ExprNodeDesc[] { exprNodeDesc })); + } + } + colList.add(exprNodeDesc); + String internalName = selRS.getColumnNames().get(this.columns.size() + i); + columnNames.add(internalName); + columnExprMap.put(internalName, exprNodeDesc); + signature.add(selRS.getSignature().get(this.columns.size() + i)); + } + operator.setConf(new SelectDesc(colList, columnNames)); + operator.setColumnExprMap(columnExprMap); + selRS.setSignature(signature); + operator.setSchema(selRS); + } + + public String getCompleteName() { + return tbl.getDbName() + "." + tbl.getTableName(); + } + + public boolean isInsertInto() { + return isInsertInto; + } + + public static boolean canRunAutogatherStats(Operator curr) { + // check the ObjectInspector + for (ColumnInfo cinfo : curr.getSchema().getSignature()) { + if (cinfo.getIsVirtualCol()) { + return false; + } else if (cinfo.getObjectInspector().getCategory() != ObjectInspector.Category.PRIMITIVE) { + return false; + } else { + switch (((PrimitiveTypeInfo) cinfo.getType()).getPrimitiveCategory()) { + case BOOLEAN: + case BYTE: + case SHORT: + case INT: + case LONG: + case TIMESTAMP: + case FLOAT: + case DOUBLE: + case STRING: + case CHAR: + case VARCHAR: + case BINARY: + case DECIMAL: + // TODO: Support case DATE: + break; + default: + return false; + } + } + } + return true; + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java index 3b6cbce..2bfbd29 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java @@ -110,11 +110,18 @@ private void handlePartialPartitionSpec(Map partSpec) throws partValsSpecified += partSpec.get(partKey) == null ? 0 : 1; } try { - if ((partValsSpecified == tbl.getPartitionKeys().size()) && (db.getPartition(tbl, partSpec, false, null, false) == null)) { - throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_INVALID_PARTITION.getMsg() + " : " + partSpec); + // for static partition, it may not exist when HIVESTATSCOLAUTOGATHER is + // set to true + if (!conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER)) { + if ((partValsSpecified == tbl.getPartitionKeys().size()) + && (db.getPartition(tbl, partSpec, false, null, false) == null)) { + throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_INVALID_PARTITION.getMsg() + + " : " + partSpec); + } } } catch (HiveException he) { - throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_INVALID_PARTITION.getMsg() + " : " + partSpec); + throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_INVALID_PARTITION.getMsg() + " : " + + partSpec); } // User might have only specified partial list of partition keys, in which case add other partition keys in partSpec @@ -157,7 +164,7 @@ private StringBuilder genPartitionClause(Map partSpec) throws Sem } else { groupByClause.append(","); } - groupByClause.append(fs.getName()); + groupByClause.append("`" + fs.getName() + "`"); } // attach the predicate and group by to the return clause @@ -235,12 +242,12 @@ private String genRewrittenQuery(List colNames, int numBitVectors, Map partSpec = null; + checkForPartitionColumns(colNames, + Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys())); + validateSpecifiedColumnNames(colNames); + if (conf.getBoolVar(ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS) && tbl.isPartitioned()) { + isPartitionStats = true; + } + + if (isPartitionStats) { + isTableLevel = false; + partSpec = AnalyzeCommandUtils.getPartKeyValuePairsFromAST(tbl, ast, conf); + handlePartialPartitionSpec(partSpec); + } else { + isTableLevel = true; + } + colType = getColumnTypes(colNames); + int numBitVectors = 0; + try { + numBitVectors = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf); + } catch (Exception e) { + throw new SemanticException(e.getMessage()); + } + rewrittenQuery = genRewrittenQuery(colNames, numBitVectors, partSpec, isPartitionStats); + rewrittenTree = genRewrittenTree(rewrittenQuery); + + context.analyzeRewrite = new AnalyzeRewriteContext(); + context.analyzeRewrite.setTableName(tbl.getDbName() + "." + tbl.getTableName()); + context.analyzeRewrite.setTblLvl(isTableLevel); + context.analyzeRewrite.setColName(colNames); + context.analyzeRewrite.setColType(colType); + return rewrittenTree; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index 96ef20d..b2125ca 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -80,6 +80,7 @@ private HashMap nameToSplitSample; private List loadTableWork; private List loadFileWork; + private List columnStatsAutoGatherContexts; private Context ctx; private QueryState queryState; private HiveConf conf; @@ -166,6 +167,7 @@ public ParseContext( Set joinOps, Set smbMapJoinOps, List loadTableWork, List loadFileWork, + List columnStatsAutoGatherContexts, Context ctx, HashMap idToTableNameMap, int destTableId, UnionProcContext uCtx, List> listMapJoinOpsNoReducer, Map prunedPartitions, @@ -188,6 +190,7 @@ public ParseContext( this.smbMapJoinOps = smbMapJoinOps; this.loadFileWork = loadFileWork; this.loadTableWork = loadTableWork; + this.columnStatsAutoGatherContexts = columnStatsAutoGatherContexts; this.topOps = topOps; this.ctx = ctx; this.idToTableNameMap = idToTableNameMap; @@ -608,4 +611,13 @@ public void setNeedViewColumnAuthorization(boolean needViewColumnAuthorization) public Map getTabNameToTabObject() { return tabNameToTabObject; } + + public List getColumnStatsAutoGatherContexts() { + return columnStatsAutoGatherContexts; + } + + public void setColumnStatsAutoGatherContexts( + List columnStatsAutoGatherContexts) { + this.columnStatsAutoGatherContexts = columnStatsAutoGatherContexts; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java index 3a226e7..f2c631f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java @@ -63,7 +63,8 @@ private final Set destCubes; private final Set destGroupingSets; private final Map destToHaving; - private final HashSet insertIntoTables; + private final Map insertIntoTables; + private final Map insertOverwriteTables; private boolean isAnalyzeCommand; // used for the analyze command (statistics) private boolean isNoScanAnalyzeCommand; // used for the analyze command (statistics) (noscan) @@ -133,7 +134,8 @@ public QBParseInfo(String alias, boolean isSubQ) { destToSortby = new HashMap(); destToOrderby = new HashMap(); destToLimit = new HashMap>(); - insertIntoTables = new HashSet(); + insertIntoTables = new HashMap(); + insertOverwriteTables = new HashMap(); destRollups = new HashSet(); destCubes = new HashSet(); destGroupingSets = new HashSet(); @@ -174,13 +176,13 @@ public void addAggregationExprsForClause(String clause, } } - public void addInsertIntoTable(String fullName) { - insertIntoTables.add(fullName.toLowerCase()); + public void addInsertIntoTable(String fullName, ASTNode ast) { + insertIntoTables.put(fullName.toLowerCase(), ast); } public boolean isInsertIntoTable(String dbName, String table) { String fullName = dbName + "." + table; - return insertIntoTables.contains(fullName.toLowerCase()); + return insertIntoTables.containsKey(fullName.toLowerCase()); } /** @@ -189,7 +191,7 @@ public boolean isInsertIntoTable(String dbName, String table) { * @return */ public boolean isInsertIntoTable(String fullTableName) { - return insertIntoTables.contains(fullTableName.toLowerCase()); + return insertIntoTables.containsKey(fullTableName.toLowerCase()); } public HashMap getAggregationExprsForClause(String clause) { @@ -636,6 +638,11 @@ public boolean isPartialScanAnalyzeCommand() { public void setPartialScanAnalyzeCommand(boolean isPartialScanAnalyzeCommand) { this.isPartialScanAnalyzeCommand = isPartialScanAnalyzeCommand; } + + public Map getInsertOverwriteTables() { + return insertOverwriteTables; + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 7162c08..bb92873 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -79,6 +79,7 @@ import org.apache.hadoop.hive.ql.exec.ArchiveUtils; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory; +import org.apache.hadoop.hive.ql.exec.FetchOperator; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.exec.FilterOperator; @@ -129,6 +130,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException.UnsupportedFeature; import org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverterPostProc; import org.apache.hadoop.hive.ql.optimizer.lineage.Generator; +import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec.SpecType; import org.apache.hadoop.hive.ql.parse.CalcitePlanner.ASTSearcher; @@ -182,6 +184,7 @@ import org.apache.hadoop.hive.ql.plan.MapJoinDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.PTFDesc; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; import org.apache.hadoop.hive.ql.plan.ScriptDesc; @@ -259,6 +262,7 @@ protected LinkedHashMap, OpParseContext> opParseCtx; private List loadTableWork; private List loadFileWork; + private List columnStatsAutoGatherContexts; private final Map joinContext; private final Map smbMapJoinContext; private final HashMap topToTable; @@ -353,6 +357,7 @@ public SemanticAnalyzer(QueryState queryState) throws SemanticException { topOps = new LinkedHashMap(); loadTableWork = new ArrayList(); loadFileWork = new ArrayList(); + columnStatsAutoGatherContexts = new ArrayList(); opParseCtx = new LinkedHashMap, OpParseContext>(); joinContext = new HashMap(); smbMapJoinContext = new HashMap(); @@ -390,6 +395,7 @@ protected void reset(boolean clearPartsCache) { tabNameToTabObject.clear(); loadTableWork.clear(); loadFileWork.clear(); + columnStatsAutoGatherContexts.clear(); topOps.clear(); destTableId = 1; idToTableNameMap.clear(); @@ -448,7 +454,7 @@ public ParseContext getParseContext() { return new ParseContext(queryState, opToPartPruner, opToPartList, topOps, new HashSet(joinContext.keySet()), new HashSet(smbMapJoinContext.keySet()), - loadTableWork, loadFileWork, ctx, idToTableNameMap, destTableId, uCtx, + loadTableWork, loadFileWork, columnStatsAutoGatherContexts, ctx, idToTableNameMap, destTableId, uCtx, listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject, opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner, viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting, @@ -1401,18 +1407,25 @@ public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1, PlannerContext plan case HiveParser.TOK_INSERT_INTO: String currentDatabase = SessionState.get().getCurrentDatabase(); String tab_name = getUnescapedName((ASTNode) ast.getChild(0).getChild(0), currentDatabase); - qbp.addInsertIntoTable(tab_name); + qbp.addInsertIntoTable(tab_name, ast); case HiveParser.TOK_DESTINATION: ctx_1.dest = "insclause-" + ctx_1.nextNum; ctx_1.nextNum++; boolean isTmpFileDest = false; if (ast.getChildCount() > 0 && ast.getChild(0) instanceof ASTNode) { - ASTNode ch = (ASTNode)ast.getChild(0); - if (ch.getToken().getType() == HiveParser.TOK_DIR - && ch.getChildCount() > 0 && ch.getChild(0) instanceof ASTNode) { - ch = (ASTNode)ch.getChild(0); + ASTNode ch = (ASTNode) ast.getChild(0); + if (ch.getToken().getType() == HiveParser.TOK_DIR && ch.getChildCount() > 0 + && ch.getChild(0) instanceof ASTNode) { + ch = (ASTNode) ch.getChild(0); isTmpFileDest = ch.getToken().getType() == HiveParser.TOK_TMP_FILE; + } else { + if (ast.getToken().getType() == HiveParser.TOK_DESTINATION + && ast.getChild(0).getType() == HiveParser.TOK_TAB) { + String fullTableName = getUnescapedName((ASTNode) ast.getChild(0).getChild(0), + SessionState.get().getCurrentDatabase()); + qbp.getInsertOverwriteTables().put(fullTableName, ast); + } } } @@ -6516,6 +6529,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) DynamicPartitionCtx dpCtx = null; LoadTableDesc ltd = null; ListBucketingCtx lbCtx = null; + Map partSpec = null; switch (dest_type.intValue()) { case QBMetaData.DEST_TABLE: { @@ -6531,7 +6545,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) ErrorMsg.INSERT_EXTERNAL_TABLE.getMsg(dest_tab.getTableName())); } - Map partSpec = qbm.getPartSpecForAlias(dest); + partSpec = qbm.getPartSpecForAlias(dest); dest_path = dest_tab.getPath(); // If the query here is an INSERT_INTO and the target is an immutable table, @@ -6875,6 +6889,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } input = genConversionSelectOperator(dest, qb, input, table_desc, dpCtx); + inputRR = opParseCtx.get(input).getRowResolver(); ArrayList vecCol = new ArrayList(); @@ -7004,9 +7019,66 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) FileSinkOperator fso = (FileSinkOperator) output; fso.getConf().setTable(dest_tab); fsopToTable.put(fso, dest_tab); + // the following code is used to collect column stats when + // hive.stats.autogather=true + // and it is an insert overwrite or insert into table + if (dest_tab != null && conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) + && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER) + && ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) { + if (dest_type.intValue() == QBMetaData.DEST_TABLE) { + genAutoColumnStatsGatheringPipeline(qb, table_desc, partSpec, input, qb.getParseInfo() + .isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); + } else if (dest_type.intValue() == QBMetaData.DEST_PARTITION) { + genAutoColumnStatsGatheringPipeline(qb, table_desc, dest_part.getSpec(), input, qb + .getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); + + } + } return output; } + private void genAutoColumnStatsGatheringPipeline(QB qb, TableDesc table_desc, + Map partSpec, Operator curr, boolean isInsertInto) throws SemanticException { + // If table is a partitioned table, we need to check the schema evolution. + // If it is insert into/overwrite to the table with dynamic partition, we + // need to check all the partitions. + // Else if it is insert into a specific partition, we need to check only the + // specific partition. + // We also need to take care of the columns that are inserted into in the + // future. + String tableName = table_desc.getTableName(); + Table table = null; + try { + table = db.getTable(tableName); + // We check only if the table is partitioned. + if (table.isPartitioned()) { + Set partitions = PartitionPruner.getAllPartitions(table); + List partDescs = new ArrayList<>(); + for (Partition partition : partitions) { + partDescs.add(Utilities.getPartitionDesc(partition)); + } + // We check all the partitions and all the columns. This is only valid + // for + // dynamic partition case. + // TODO: check only the partitions that are potentially used. + // TODO: check only the columns that are inserted into. + if (FetchOperator.needConversion(table_desc, partDescs)) { + LOG.warn("Can not auto generate column stats for " + tableName + + ". Please rerun `analyze table " + tableName + " compute statistics for columns`"); + return; + } + } + } catch (HiveException e) { + throw new SemanticException(e.getMessage()); + } + LOG.info("Generate an operator pipleline to autogather column stats for table " + tableName + + " in query " + ctx.getCmd()); + ColumnStatsAutoGatherContext columnStatsAutoGatherContext = null; + columnStatsAutoGatherContext = new ColumnStatsAutoGatherContext(this, conf, curr, table, partSpec, isInsertInto); + columnStatsAutoGatherContext.insertAnalyzePipeline(); + columnStatsAutoGatherContexts.add(columnStatsAutoGatherContext); + } + String fixCtasColumnName(String colName) { return colName; } @@ -10689,7 +10761,7 @@ void analyzeInternal(ASTNode ast, PlannerContext plannerCtx) throws SemanticExce ParseContext pCtx = new ParseContext(queryState, opToPartPruner, opToPartList, topOps, new HashSet(joinContext.keySet()), new HashSet(smbMapJoinContext.keySet()), - loadTableWork, loadFileWork, ctx, idToTableNameMap, destTableId, uCtx, + loadTableWork, loadFileWork, columnStatsAutoGatherContexts, ctx, idToTableNameMap, destTableId, uCtx, listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject, opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner, viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting, @@ -12895,4 +12967,12 @@ private void warn(String msg) { String.format("Warning: %s", msg)); } + public List getLoadFileWork() { + return loadFileWork; + } + + public void setLoadFileWork(List loadFileWork) { + this.loadFileWork = loadFileWork; + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 4049f40..2283115 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -23,12 +23,15 @@ import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashSet; +import java.util.LinkedList; import java.util.List; +import java.util.Queue; import java.util.Set; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -255,9 +258,46 @@ public void compile(final ParseContext pCtx, final List> leafTasks = new ArrayList<>(); + // We assume that ColumnStatsTask should be done after all the other tasks + // finish. + Queue> queue = new LinkedList<>(); + queue.addAll(rootTasks); + while (!queue.isEmpty()) { + Task tsk = queue.poll(); + if (tsk.getDependentTasks() == null || tsk.getDependentTasks().isEmpty()) { + if (!leafTasks.contains(tsk)) { + leafTasks.add(tsk); + } + } else { + for (Task t : tsk.getDependentTasks()) { + if (!queue.contains(t)) { + queue.offer(t); + } + } + } + } + if (isCStats) { + genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadFileWork, leafTasks, outerQueryLimit, 0); + } else { + for (ColumnStatsAutoGatherContext columnStatsAutoGatherContext : pCtx + .getColumnStatsAutoGatherContexts()) { + if (!columnStatsAutoGatherContext.isInsertInto()) { + genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), + columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, 0); + } else { + int numBitVector; + try { + numBitVector = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf); + } catch (Exception e) { + throw new SemanticException(e.getMessage()); + } + genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), + columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, numBitVector); + } + } + } } // For each task, set the key descriptor for the reducer @@ -355,8 +395,9 @@ public void compile(final ParseContext pCtx, final List loadTableWork, - List loadFileWork, List> rootTasks, int outerQueryLimit) { + protected void genColumnStatsTask(AnalyzeRewriteContext analyzeRewrite, + List loadFileWork, List> leafTasks, + int outerQueryLimit, int numBitVector) { ColumnStatsTask cStatsTask = null; ColumnStatsWork cStatsWork = null; FetchWork fetch = null; @@ -385,12 +426,12 @@ protected void genColumnStatsTask(AnalyzeRewriteContext analyzeRewrite, List tsk : leafTasks) { + tsk.addDependentTask(cStatsTask); + } } @@ -453,7 +494,7 @@ public ParseContext getParseContext(ParseContext pCtx, List10; + +desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12'); + + +drop table nzhang_part14; +create table if not exists nzhang_part14 (key string, value string) +partitioned by (ds string, hr string); + +INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10; + +desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12'); + +drop table a; +create table a (key string, value string) +partitioned by (ds string, hr string); + +drop table b; +create table b (key string, value string) +partitioned by (ds string, hr string); + +drop table c; +create table c (key string, value string) +partitioned by (ds string, hr string); + + +FROM srcpart +INSERT OVERWRITE TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10 +INSERT OVERWRITE TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11 +INSERT OVERWRITE TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0; + +explain select key from a; +explain select value from b; +explain select key from b; +explain select value from c; +explain select key from c; + diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_2.q b/ql/src/test/queries/clientpositive/autoColumnStats_2.q new file mode 100644 index 0000000..c1abcb1 --- /dev/null +++ b/ql/src/test/queries/clientpositive/autoColumnStats_2.q @@ -0,0 +1,214 @@ +set hive.stats.column.autogather=true; +set hive.stats.fetch.column.stats=true; +set hive.exec.dynamic.partition=true; +set hive.exec.dynamic.partition.mode=nonstrict; +set hive.auto.convert.join=true; +set hive.join.emit.interval=2; +set hive.auto.convert.join.noconditionaltask=true; +set hive.auto.convert.join.noconditionaltask.size=10000; +set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ; +set hive.optimize.bucketingsorting=false; + +drop table src_multi1; + +create table src_multi1 like src; + +insert into table src_multi1 select * from src; + +explain extended select * from src_multi1; + +describe formatted src_multi1; + +drop table a; +drop table b; +create table a like src; +create table b like src; + +from src +insert into table a select * +insert into table b select *; + +describe formatted a key; +describe formatted b key; + +from src +insert overwrite table a select * +insert into table b select *; + +describe formatted a; +describe formatted b; + +describe formatted b key; +describe formatted b value; + +insert into table b select NULL, NULL from src limit 10; + +describe formatted b key; +describe formatted b value; + +insert into table b(value) select key+100000 from src limit 10; + +describe formatted b key; +describe formatted b value; + +drop table src_multi2; + +create table src_multi2 like src; + +insert into table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key; + +describe formatted src_multi2; + + +drop table nzhang_part14; + +create table if not exists nzhang_part14 (key string) + partitioned by (value string); + +insert into table nzhang_part14 partition(value) +select key, value from ( + select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a + union all + select * from (select 'k2' as key, '' as value from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value from src limit 2)c +) T; + +explain select key from nzhang_part14; + + +drop table src5; + +create table src5 as select key, value from src limit 5; + +insert into table nzhang_part14 partition(value) +select key, value from src5; + +explain select key from nzhang_part14; + +drop table alter5; + +create table alter5 ( col1 string ) partitioned by (dt string); + +alter table alter5 add partition (dt='a'); + +describe formatted alter5 partition (dt='a'); + +insert into table alter5 partition (dt='a') select key from src ; + +describe formatted alter5 partition (dt='a'); + +explain select * from alter5 where dt='a'; + +drop table alter5; + +create table alter5 ( col1 string ) partitioned by (dt string); + +alter table alter5 add partition (dt='a') location 'parta'; + +describe formatted alter5 partition (dt='a'); + +insert into table alter5 partition (dt='a') select key from src ; + +describe formatted alter5 partition (dt='a'); + +explain select * from alter5 where dt='a'; + + +drop table src_stat_part; +create table src_stat_part(key string, value string) partitioned by (partitionId int); + +insert into table src_stat_part partition (partitionId=1) +select * from src1 limit 5; + +describe formatted src_stat_part PARTITION(partitionId=1); + +insert into table src_stat_part partition (partitionId=2) +select * from src1; + +describe formatted src_stat_part PARTITION(partitionId=2); + +drop table srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +drop table tab_part; +CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +drop table srcbucket_mapjoin_part; +CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; + +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); + +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); + +insert into table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part; + +describe formatted tab_part partition (ds='2008-04-08'); + +CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert into table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin; + +describe formatted tab partition (ds='2008-04-08'); + +drop table nzhang_part14; + +create table if not exists nzhang_part14 (key string, value string) + partitioned by (ds string, hr string); + +describe formatted nzhang_part14; + +insert into table nzhang_part14 partition(ds, hr) +select key, value, ds, hr from ( + select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a + union all + select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c +) T; + +desc formatted nzhang_part14 partition(ds='1', hr='3'); + + +INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10; + +desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12'); + + +drop table nzhang_part14; +create table if not exists nzhang_part14 (key string, value string) +partitioned by (ds string, hr string); + +INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10; + +desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12'); + +drop table a; +create table a (key string, value string) +partitioned by (ds string, hr string); + +drop table b; +create table b (key string, value string) +partitioned by (ds string, hr string); + +drop table c; +create table c (key string, value string) +partitioned by (ds string, hr string); + + +FROM srcpart +INSERT into TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10 +INSERT into TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11 +INSERT into TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0; + +explain select key from a; +explain select value from b; +explain select key from b; +explain select value from c; +explain select key from c; + diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_3.q b/ql/src/test/queries/clientpositive/autoColumnStats_3.q new file mode 100644 index 0000000..2ddd981 --- /dev/null +++ b/ql/src/test/queries/clientpositive/autoColumnStats_3.q @@ -0,0 +1,67 @@ +set hive.stats.column.autogather=false; +set hive.stats.fetch.column.stats=true; +set hive.exec.dynamic.partition=true; +set hive.exec.dynamic.partition.mode=nonstrict; +set hive.auto.convert.join=true; +set hive.join.emit.interval=2; +set hive.auto.convert.join.noconditionaltask=true; +set hive.auto.convert.join.noconditionaltask.size=10000; +set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ; +set hive.optimize.bucketingsorting=false; + +drop table src_multi1; + +create table src_multi1 like src; + +analyze table src_multi1 compute statistics for columns key; + +describe formatted src_multi1; + +set hive.stats.column.autogather=true; + +insert into table src_multi1 select * from src; + +describe formatted src_multi1; + + +set hive.stats.column.autogather=false; + +drop table nzhang_part14; + +create table if not exists nzhang_part14 (key string, value string) + partitioned by (ds string, hr string); + +describe formatted nzhang_part14; + +insert into table nzhang_part14 partition(ds, hr) +select key, value, ds, hr from ( + select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a + union all + select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c +) T; + +desc formatted nzhang_part14 partition(ds='1', hr='3'); + +analyze table nzhang_part14 partition(ds='1', hr='3') compute statistics for columns value; + +desc formatted nzhang_part14 partition(ds='1', hr='3'); + +desc formatted nzhang_part14 partition(ds='2', hr='1'); + +set hive.stats.column.autogather=true; + +insert into table nzhang_part14 partition(ds, hr) +select key, value, ds, hr from ( + select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a + union all + select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c +) T; + +desc formatted nzhang_part14 partition(ds='1', hr='3'); + +desc formatted nzhang_part14 partition(ds='2', hr='1'); + diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_4.q b/ql/src/test/queries/clientpositive/autoColumnStats_4.q new file mode 100644 index 0000000..9780a75 --- /dev/null +++ b/ql/src/test/queries/clientpositive/autoColumnStats_4.q @@ -0,0 +1,20 @@ +set hive.stats.column.autogather=true; +set hive.mapred.mode=nonstrict; +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + +create table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); + +desc formatted acid_dtt; + +explain insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; + +insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; + +desc formatted acid_dtt; + +delete from acid_dtt where b = '0ruyd6Y50JpdGRf6HqD' or b = '2uLyD28144vklju213J1mr'; + +desc formatted acid_dtt; + + diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_5.q b/ql/src/test/queries/clientpositive/autoColumnStats_5.q new file mode 100644 index 0000000..9f41598 --- /dev/null +++ b/ql/src/test/queries/clientpositive/autoColumnStats_5.q @@ -0,0 +1,32 @@ +set hive.stats.column.autogather=true; +set hive.mapred.mode=nonstrict; +set hive.cli.print.header=true; +SET hive.exec.schema.evolution=true; +SET hive.vectorized.execution.enabled=false; +set hive.fetch.task.conversion=none; +set hive.exec.dynamic.partition.mode=nonstrict; + +-- SORT_QUERY_RESULTS +-- +-- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Partitioned +-- +-- +-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT +--- +CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE; + +explain insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original'); + +insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original'); + +-- Table-Non-Cascade ADD COLUMNS ... +alter table partitioned1 add columns(c int, d string); + +explain insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty'); + +insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty'); + +explain insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred'); + +insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred'); + diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_6.q b/ql/src/test/queries/clientpositive/autoColumnStats_6.q new file mode 100644 index 0000000..45e5daa --- /dev/null +++ b/ql/src/test/queries/clientpositive/autoColumnStats_6.q @@ -0,0 +1,41 @@ +set hive.stats.column.autogather=true; +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; +set hive.merge.orcfile.stripe.level=true; +set hive.exec.dynamic.partition=true; +set hive.exec.dynamic.partition.mode=nonstrict; +set hive.merge.sparkfiles=true; + +DROP TABLE orcfile_merge2a; + +CREATE TABLE orcfile_merge2a (key INT, value STRING) + PARTITIONED BY (one string, two string, three string) + STORED AS ORC; + +EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three) + SELECT key, value, PMOD(HASH(key), 10) as two, + PMOD(HASH(value), 10) as three + FROM src; + +INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three) + SELECT key, value, PMOD(HASH(key), 10) as two, + PMOD(HASH(value), 10) as three + FROM src; + +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge2a/one=1/two=0/three=2/; + +SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(*) USING 'tr \t _' AS (c) + FROM orcfile_merge2a +) t; + +set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; + +SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(key, value, '1', PMOD(HASH(key), 10), + PMOD(HASH(value), 10)) USING 'tr \t _' AS (c) + FROM src +) t; + +DROP TABLE orcfile_merge2a; + diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_7.q b/ql/src/test/queries/clientpositive/autoColumnStats_7.q new file mode 100644 index 0000000..2227685 --- /dev/null +++ b/ql/src/test/queries/clientpositive/autoColumnStats_7.q @@ -0,0 +1,19 @@ +set hive.stats.column.autogather=true; +set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; +set hive.map.aggr=false; +set hive.groupby.skewindata=true; + +-- Taken from groupby2.q +CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; +CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src; + +explain FROM src_temp +INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1); + +FROM src_temp +INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1); + +SELECT dest_g2.* FROM dest_g2; + +DROP TABLE dest_g2; +DROP TABLE src_temp; diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_8.q b/ql/src/test/queries/clientpositive/autoColumnStats_8.q new file mode 100644 index 0000000..42d070a --- /dev/null +++ b/ql/src/test/queries/clientpositive/autoColumnStats_8.q @@ -0,0 +1,27 @@ +set hive.stats.column.autogather=true; +-- SORT_QUERY_RESULTS + +show partitions srcpart; + + + +create table if not exists nzhang_part8 like srcpart; +describe extended nzhang_part8; + +set hive.merge.mapfiles=false; +set hive.exec.dynamic.partition=true; +set hive.exec.dynamic.partition.mode=nonstrict; + +explain extended +from srcpart +insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'; + +from srcpart +insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'; + +show partitions nzhang_part8; + +select * from nzhang_part8 where ds is not null and hr is not null; + diff --git a/ql/src/test/results/clientpositive/autoColumnStats_1.q.out b/ql/src/test/results/clientpositive/autoColumnStats_1.q.out new file mode 100644 index 0000000..e290e52 --- /dev/null +++ b/ql/src/test/results/clientpositive/autoColumnStats_1.q.out @@ -0,0 +1,1379 @@ +PREHOOK: query: drop table src_multi1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table src_multi1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table src_multi1 like src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_multi1 +POSTHOOK: query: create table src_multi1 like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_multi1 +PREHOOK: query: insert overwrite table src_multi1 select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@src_multi1 +POSTHOOK: query: insert overwrite table src_multi1 select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_multi1 +POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain extended select * from src_multi1 +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select * from src_multi1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: src_multi1 + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: describe formatted src_multi1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_multi1 +POSTHOOK: query: describe formatted src_multi1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_multi1 +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table a +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table a +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table b +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table b +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table a like src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@a +POSTHOOK: query: create table a like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@a +PREHOOK: query: create table b like src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@b +POSTHOOK: query: create table b like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@b +PREHOOK: query: from src +insert overwrite table a select * +insert overwrite table b select * +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@a +PREHOOK: Output: default@b +POSTHOOK: query: from src +insert overwrite table a select * +insert overwrite table b select * +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@a +POSTHOOK: Output: default@b +POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted a +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@a +POSTHOOK: query: describe formatted a +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@a +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: describe formatted b +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table a +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@a +PREHOOK: Output: default@a +POSTHOOK: query: drop table a +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@a +POSTHOOK: Output: default@a +PREHOOK: query: drop table b +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@b +PREHOOK: Output: default@b +POSTHOOK: query: drop table b +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@b +POSTHOOK: Output: default@b +PREHOOK: query: create table a like src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@a +POSTHOOK: query: create table a like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@a +PREHOOK: query: create table b like src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@b +POSTHOOK: query: create table b like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@b +PREHOOK: query: from src +insert overwrite table a select * +insert into table b select * +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@a +PREHOOK: Output: default@b +POSTHOOK: query: from src +insert overwrite table a select * +insert into table b select * +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@a +POSTHOOK: Output: default@b +POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted a +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@a +POSTHOOK: query: describe formatted a +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@a +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: describe formatted b +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table src_multi2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table src_multi2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table src_multi2 like src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_multi2 +POSTHOOK: query: create table src_multi2 like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_multi2 +PREHOOK: query: insert overwrite table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +PREHOOK: Output: default@src_multi2 +POSTHOOK: query: insert overwrite table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@src_multi2 +POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted src_multi2 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_multi2 +POSTHOOK: query: describe formatted src_multi2 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_multi2 +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 508 + rawDataSize 5400 + totalSize 5908 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table nzhang_part14 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table nzhang_part14 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table if not exists nzhang_part14 (key string) + partitioned by (value string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: create table if not exists nzhang_part14 (key string) + partitioned by (value string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: insert overwrite table nzhang_part14 partition(value) +select key, value from ( + select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a + union all + select * from (select 'k2' as key, '' as value from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value from src limit 2)c +) T +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: insert overwrite table nzhang_part14 partition(value) +select key, value from ( + select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a + union all + select * from (select 'k2' as key, '' as value from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value from src limit 2)c +) T +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@nzhang_part14@value= +POSTHOOK: Output: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Lineage: nzhang_part14 PARTITION(value= ).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION [] +PREHOOK: query: explain select key from nzhang_part14 +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from nzhang_part14 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: nzhang_part14 + Statistics: Num rows: 6 Data size: 516 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 6 Data size: 516 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: drop table src5 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table src5 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table src5 as select key, value from src limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src5 +POSTHOOK: query: create table src5 as select key, value from src limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src5 +POSTHOOK: Lineage: src5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table nzhang_part14 partition(value) +select key, value from src5 +PREHOOK: type: QUERY +PREHOOK: Input: default@src5 +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: insert overwrite table nzhang_part14 partition(value) +select key, value from src5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src5 +POSTHOOK: Output: default@nzhang_part14@value=val_165 +POSTHOOK: Output: default@nzhang_part14@value=val_238 +POSTHOOK: Output: default@nzhang_part14@value=val_27 +POSTHOOK: Output: default@nzhang_part14@value=val_311 +POSTHOOK: Output: default@nzhang_part14@value=val_86 +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_165).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_238).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_27).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_311).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_86).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: explain select key from nzhang_part14 +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from nzhang_part14 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: nzhang_part14 + Statistics: Num rows: 11 Data size: 946 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 11 Data size: 946 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@alter5 +POSTHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@alter5 +PREHOOK: query: alter table alter5 add partition (dt='a') location 'parta' +PREHOOK: type: ALTERTABLE_ADDPARTS +#### A masked pattern was here #### +PREHOOK: Output: default@alter5 +POSTHOOK: query: alter table alter5 add partition (dt='a') location 'parta' +POSTHOOK: type: ALTERTABLE_ADDPARTS +#### A masked pattern was here #### +POSTHOOK: Output: default@alter5 +POSTHOOK: Output: default@alter5@dt=a +PREHOOK: query: describe formatted alter5 partition (dt='a') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter5 +POSTHOOK: query: describe formatted alter5 partition (dt='a') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter5 +# col_name data_type comment + +col1 string + +# Partition Information +# col_name data_type comment + +dt string + +# Detailed Partition Information +Partition Value: [a] +Database: default +Table: alter5 +#### A masked pattern was here #### +Partition Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert overwrite table alter5 partition (dt='a') select key from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@alter5@dt=a +POSTHOOK: query: insert overwrite table alter5 partition (dt='a') select key from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@alter5@dt=a +POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: describe formatted alter5 partition (dt='a') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter5 +POSTHOOK: query: describe formatted alter5 partition (dt='a') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter5 +# col_name data_type comment + +col1 string + +# Partition Information +# col_name data_type comment + +dt string + +# Detailed Partition Information +Partition Value: [a] +Database: default +Table: alter5 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"col1\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 500 + rawDataSize 1406 + totalSize 1906 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain select * from alter5 where dt='a' +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from alter5 where dt='a' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: alter5 + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: col1 (type: string), 'a' (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 86000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: drop table src_stat_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table src_stat_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_stat_part +POSTHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_stat_part +PREHOOK: query: insert overwrite table src_stat_part partition (partitionId=1) +select * from src1 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: default@src_stat_part@partitionid=1 +POSTHOOK: query: insert overwrite table src_stat_part partition (partitionId=1) +select * from src1 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@src_stat_part@partitionid=1 +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_stat_part +POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_stat_part +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +partitionid int + +# Detailed Partition Information +Partition Value: [1] +Database: default +Table: src_stat_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 5 + rawDataSize 38 + totalSize 43 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert overwrite table src_stat_part partition (partitionId=2) +select * from src1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: default@src_stat_part@partitionid=2 +POSTHOOK: query: insert overwrite table src_stat_part partition (partitionId=2) +select * from src1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@src_stat_part@partitionid=2 +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_stat_part +POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_stat_part +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +partitionid int + +# Detailed Partition Information +Partition Value: [2] +Database: default +Table: src_stat_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 25 + rawDataSize 191 + totalSize 216 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table srcbucket_mapjoin +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table srcbucket_mapjoin +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin +PREHOOK: query: drop table tab_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table tab_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab_part +POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab_part +PREHOOK: query: drop table srcbucket_mapjoin_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table srcbucket_mapjoin_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin_part +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin_part +PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: Output: default@tab_part@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin_part +POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: Output: default@tab_part@ds=2008-04-08 +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: describe formatted tab_part partition (ds='2008-04-08') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@tab_part +POSTHOOK: query: describe formatted tab_part partition (ds='2008-04-08') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@tab_part +# col_name data_type comment + +key int +value string + +# Partition Information +# col_name data_type comment + +ds string + +# Detailed Partition Information +Partition Value: [2008-04-08] +Database: default +Table: tab_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 4 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 4 +Bucket Columns: [key] +Sort Columns: [Order(col:key, order:1)] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab +POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab +PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin +PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: Output: default@tab@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin +POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 +POSTHOOK: Output: default@tab@ds=2008-04-08 +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: describe formatted tab partition (ds='2008-04-08') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@tab +POSTHOOK: query: describe formatted tab partition (ds='2008-04-08') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@tab +# col_name data_type comment + +key int +value string + +# Partition Information +# col_name data_type comment + +ds string + +# Detailed Partition Information +Partition Value: [2008-04-08] +Database: default +Table: tab +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 2 + numRows 242 + rawDataSize 2566 + totalSize 2808 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [key] +Sort Columns: [Order(col:key, order:1)] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table nzhang_part14 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@nzhang_part14 +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: drop table nzhang_part14 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@nzhang_part14 +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: create table if not exists nzhang_part14 (key string, value string) + partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string) + partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: describe formatted nzhang_part14 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: describe formatted nzhang_part14 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert overwrite table nzhang_part14 partition(ds, hr) +select key, value, ds, hr from ( + select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a + union all + select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c +) T +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: insert overwrite table nzhang_part14 partition(ds, hr) +select key, value, ds, hr from ( + select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a + union all + select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c +) T +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@nzhang_part14@ds=1/hr=2 +POSTHOOK: Output: default@nzhang_part14@ds=1/hr=3 +POSTHOOK: Output: default@nzhang_part14@ds=2/hr=1 +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).value EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).value EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).value EXPRESSION [] +PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Partition Information +Partition Value: [1, 3] +Database: default +Table: nzhang_part14 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 2 + rawDataSize 6 + totalSize 8 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@nzhang_part14@ds=2010-03-03 +POSTHOOK: query: INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=11 +POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=12 +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Partition Information +Partition Value: [2010-03-03, 12] +Database: default +Table: nzhang_part14 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 1000 + rawDataSize 10624 + totalSize 11624 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table nzhang_part14 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@nzhang_part14 +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: drop table nzhang_part14 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@nzhang_part14 +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: create table if not exists nzhang_part14 (key string, value string) +partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string) +partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@nzhang_part14@ds=2010-03-03 +POSTHOOK: query: INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=11 +POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=12 +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Partition Information +Partition Value: [2010-03-03, 12] +Database: default +Table: nzhang_part14 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 1000 + rawDataSize 10624 + totalSize 11624 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table a +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@a +PREHOOK: Output: default@a +POSTHOOK: query: drop table a +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@a +POSTHOOK: Output: default@a +PREHOOK: query: create table a (key string, value string) +partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@a +POSTHOOK: query: create table a (key string, value string) +partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@a +PREHOOK: query: drop table b +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@b +PREHOOK: Output: default@b +POSTHOOK: query: drop table b +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@b +POSTHOOK: Output: default@b +PREHOOK: query: create table b (key string, value string) +partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@b +POSTHOOK: query: create table b (key string, value string) +partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@b +PREHOOK: query: drop table c +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table c +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table c (key string, value string) +partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@c +POSTHOOK: query: create table c (key string, value string) +partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@c +PREHOOK: query: FROM srcpart +INSERT OVERWRITE TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10 +INSERT OVERWRITE TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11 +INSERT OVERWRITE TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@a@ds=2010-03-11 +PREHOOK: Output: default@b@ds=2010-04-11 +PREHOOK: Output: default@c@ds=2010-05-11 +POSTHOOK: query: FROM srcpart +INSERT OVERWRITE TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10 +INSERT OVERWRITE TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11 +INSERT OVERWRITE TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@a@ds=2010-03-11/hr=11 +POSTHOOK: Output: default@a@ds=2010-03-11/hr=12 +POSTHOOK: Output: default@b@ds=2010-04-11/hr=12 +POSTHOOK: Output: default@c@ds=2010-05-11/hr=11 +POSTHOOK: Output: default@c@ds=2010-05-11/hr=12 +POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: b PARTITION(ds=2010-04-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: b PARTITION(ds=2010-04-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select key from a +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from a +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: a + Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: explain select value from b +PREHOOK: type: QUERY +POSTHOOK: query: explain select value from b +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: b + Statistics: Num rows: 1000 Data size: 91000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: value (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1000 Data size: 91000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: explain select key from b +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from b +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: b + Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: explain select value from c +PREHOOK: type: QUERY +POSTHOOK: query: explain select value from c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: c + Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: value (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: explain select key from c +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: c + Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + diff --git a/ql/src/test/results/clientpositive/autoColumnStats_2.q.out b/ql/src/test/results/clientpositive/autoColumnStats_2.q.out new file mode 100644 index 0000000..a76bf5f --- /dev/null +++ b/ql/src/test/results/clientpositive/autoColumnStats_2.q.out @@ -0,0 +1,1500 @@ +PREHOOK: query: drop table src_multi1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table src_multi1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table src_multi1 like src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_multi1 +POSTHOOK: query: create table src_multi1 like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_multi1 +PREHOOK: query: insert into table src_multi1 select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@src_multi1 +POSTHOOK: query: insert into table src_multi1 select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_multi1 +POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain extended select * from src_multi1 +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select * from src_multi1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: src_multi1 + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: describe formatted src_multi1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_multi1 +POSTHOOK: query: describe formatted src_multi1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_multi1 +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table a +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table a +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table b +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table b +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table a like src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@a +POSTHOOK: query: create table a like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@a +PREHOOK: query: create table b like src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@b +POSTHOOK: query: create table b like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@b +PREHOOK: query: from src +insert into table a select * +insert into table b select * +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@a +PREHOOK: Output: default@b +POSTHOOK: query: from src +insert into table a select * +insert into table b select * +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@a +POSTHOOK: Output: default@b +POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted a key +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@a +POSTHOOK: query: describe formatted a key +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@a +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +key string 0 205 2.812 3 from deserializer +PREHOOK: query: describe formatted b key +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b key +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +key string 0 205 2.812 3 from deserializer +PREHOOK: query: from src +insert overwrite table a select * +insert into table b select * +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@a +PREHOOK: Output: default@b +POSTHOOK: query: from src +insert overwrite table a select * +insert into table b select * +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@a +POSTHOOK: Output: default@b +POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted a +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@a +POSTHOOK: query: describe formatted a +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@a +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: describe formatted b +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 2 + numRows 1000 + rawDataSize 10624 + totalSize 11624 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: describe formatted b key +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b key +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +key string 0 205 2.812 3 from deserializer +PREHOOK: query: describe formatted b value +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b value +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +value string 0 214 6.812 7 from deserializer +PREHOOK: query: insert into table b select NULL, NULL from src limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@b +POSTHOOK: query: insert into table b select NULL, NULL from src limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@b +POSTHOOK: Lineage: b.key SIMPLE [] +POSTHOOK: Lineage: b.value SIMPLE [] +PREHOOK: query: describe formatted b key +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b key +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +key string 10 205 2.812 3 from deserializer +PREHOOK: query: describe formatted b value +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b value +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +value string 10 214 6.812 7 from deserializer +PREHOOK: query: insert into table b(value) select key+100000 from src limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@b +POSTHOOK: query: insert into table b(value) select key+100000 from src limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@b +POSTHOOK: Lineage: b.key SIMPLE [] +POSTHOOK: Lineage: b.value EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: describe formatted b key +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b key +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +key string 20 205 2.812 3 from deserializer +PREHOOK: query: describe formatted b value +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b value +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +value string 10 214 8.0 8 from deserializer +PREHOOK: query: drop table src_multi2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table src_multi2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table src_multi2 like src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_multi2 +POSTHOOK: query: create table src_multi2 like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_multi2 +PREHOOK: query: insert into table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +PREHOOK: Output: default@src_multi2 +POSTHOOK: query: insert into table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@src_multi2 +POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted src_multi2 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_multi2 +POSTHOOK: query: describe formatted src_multi2 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_multi2 +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 508 + rawDataSize 5400 + totalSize 5908 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table nzhang_part14 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table nzhang_part14 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table if not exists nzhang_part14 (key string) + partitioned by (value string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: create table if not exists nzhang_part14 (key string) + partitioned by (value string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: insert into table nzhang_part14 partition(value) +select key, value from ( + select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a + union all + select * from (select 'k2' as key, '' as value from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value from src limit 2)c +) T +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: insert into table nzhang_part14 partition(value) +select key, value from ( + select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a + union all + select * from (select 'k2' as key, '' as value from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value from src limit 2)c +) T +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@nzhang_part14@value= +POSTHOOK: Output: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Lineage: nzhang_part14 PARTITION(value= ).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION [] +PREHOOK: query: explain select key from nzhang_part14 +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from nzhang_part14 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: nzhang_part14 + Statistics: Num rows: 6 Data size: 516 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 6 Data size: 516 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: drop table src5 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table src5 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table src5 as select key, value from src limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src5 +POSTHOOK: query: create table src5 as select key, value from src limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src5 +POSTHOOK: Lineage: src5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert into table nzhang_part14 partition(value) +select key, value from src5 +PREHOOK: type: QUERY +PREHOOK: Input: default@src5 +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: insert into table nzhang_part14 partition(value) +select key, value from src5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src5 +POSTHOOK: Output: default@nzhang_part14@value=val_165 +POSTHOOK: Output: default@nzhang_part14@value=val_238 +POSTHOOK: Output: default@nzhang_part14@value=val_27 +POSTHOOK: Output: default@nzhang_part14@value=val_311 +POSTHOOK: Output: default@nzhang_part14@value=val_86 +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_165).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_238).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_27).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_311).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_86).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: explain select key from nzhang_part14 +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from nzhang_part14 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: nzhang_part14 + Statistics: Num rows: 11 Data size: 946 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 11 Data size: 946 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: drop table alter5 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table alter5 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@alter5 +POSTHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@alter5 +PREHOOK: query: alter table alter5 add partition (dt='a') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@alter5 +POSTHOOK: query: alter table alter5 add partition (dt='a') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@alter5 +POSTHOOK: Output: default@alter5@dt=a +PREHOOK: query: describe formatted alter5 partition (dt='a') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter5 +POSTHOOK: query: describe formatted alter5 partition (dt='a') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter5 +# col_name data_type comment + +col1 string + +# Partition Information +# col_name data_type comment + +dt string + +# Detailed Partition Information +Partition Value: [a] +Database: default +Table: alter5 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 0 + numRows 0 + rawDataSize 0 + totalSize 0 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table alter5 partition (dt='a') select key from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@alter5@dt=a +POSTHOOK: query: insert into table alter5 partition (dt='a') select key from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@alter5@dt=a +POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: describe formatted alter5 partition (dt='a') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter5 +POSTHOOK: query: describe formatted alter5 partition (dt='a') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter5 +# col_name data_type comment + +col1 string + +# Partition Information +# col_name data_type comment + +dt string + +# Detailed Partition Information +Partition Value: [a] +Database: default +Table: alter5 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"col1\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 500 + rawDataSize 1406 + totalSize 1906 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain select * from alter5 where dt='a' +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from alter5 where dt='a' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: alter5 + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: col1 (type: string), 'a' (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 86000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: drop table alter5 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@alter5 +PREHOOK: Output: default@alter5 +POSTHOOK: query: drop table alter5 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@alter5 +POSTHOOK: Output: default@alter5 +PREHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@alter5 +POSTHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@alter5 +PREHOOK: query: alter table alter5 add partition (dt='a') location 'parta' +PREHOOK: type: ALTERTABLE_ADDPARTS +#### A masked pattern was here #### +PREHOOK: Output: default@alter5 +POSTHOOK: query: alter table alter5 add partition (dt='a') location 'parta' +POSTHOOK: type: ALTERTABLE_ADDPARTS +#### A masked pattern was here #### +POSTHOOK: Output: default@alter5 +POSTHOOK: Output: default@alter5@dt=a +PREHOOK: query: describe formatted alter5 partition (dt='a') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter5 +POSTHOOK: query: describe formatted alter5 partition (dt='a') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter5 +# col_name data_type comment + +col1 string + +# Partition Information +# col_name data_type comment + +dt string + +# Detailed Partition Information +Partition Value: [a] +Database: default +Table: alter5 +#### A masked pattern was here #### +Partition Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table alter5 partition (dt='a') select key from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@alter5@dt=a +POSTHOOK: query: insert into table alter5 partition (dt='a') select key from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@alter5@dt=a +POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: describe formatted alter5 partition (dt='a') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter5 +POSTHOOK: query: describe formatted alter5 partition (dt='a') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter5 +# col_name data_type comment + +col1 string + +# Partition Information +# col_name data_type comment + +dt string + +# Detailed Partition Information +Partition Value: [a] +Database: default +Table: alter5 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"col1\":\"true\"}} + numFiles 1 + totalSize 1906 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain select * from alter5 where dt='a' +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from alter5 where dt='a' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: alter5 + Statistics: Num rows: 19 Data size: 1653 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: col1 (type: string), 'a' (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 19 Data size: 3268 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: drop table src_stat_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table src_stat_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_stat_part +POSTHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_stat_part +PREHOOK: query: insert into table src_stat_part partition (partitionId=1) +select * from src1 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: default@src_stat_part@partitionid=1 +POSTHOOK: query: insert into table src_stat_part partition (partitionId=1) +select * from src1 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@src_stat_part@partitionid=1 +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_stat_part +POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_stat_part +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +partitionid int + +# Detailed Partition Information +Partition Value: [1] +Database: default +Table: src_stat_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 5 + rawDataSize 38 + totalSize 43 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table src_stat_part partition (partitionId=2) +select * from src1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: default@src_stat_part@partitionid=2 +POSTHOOK: query: insert into table src_stat_part partition (partitionId=2) +select * from src1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@src_stat_part@partitionid=2 +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_stat_part +POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_stat_part +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +partitionid int + +# Detailed Partition Information +Partition Value: [2] +Database: default +Table: src_stat_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 25 + rawDataSize 191 + totalSize 216 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table srcbucket_mapjoin +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table srcbucket_mapjoin +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin +PREHOOK: query: drop table tab_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table tab_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab_part +POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab_part +PREHOOK: query: drop table srcbucket_mapjoin_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table srcbucket_mapjoin_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin_part +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: insert into table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin_part +PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: Output: default@tab_part@ds=2008-04-08 +POSTHOOK: query: insert into table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin_part +POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: Output: default@tab_part@ds=2008-04-08 +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: describe formatted tab_part partition (ds='2008-04-08') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@tab_part +POSTHOOK: query: describe formatted tab_part partition (ds='2008-04-08') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@tab_part +# col_name data_type comment + +key int +value string + +# Partition Information +# col_name data_type comment + +ds string + +# Detailed Partition Information +Partition Value: [2008-04-08] +Database: default +Table: tab_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 4 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 4 +Bucket Columns: [key] +Sort Columns: [Order(col:key, order:1)] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab +POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab +PREHOOK: query: insert into table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin +PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: Output: default@tab@ds=2008-04-08 +POSTHOOK: query: insert into table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin +POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 +POSTHOOK: Output: default@tab@ds=2008-04-08 +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: describe formatted tab partition (ds='2008-04-08') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@tab +POSTHOOK: query: describe formatted tab partition (ds='2008-04-08') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@tab +# col_name data_type comment + +key int +value string + +# Partition Information +# col_name data_type comment + +ds string + +# Detailed Partition Information +Partition Value: [2008-04-08] +Database: default +Table: tab +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 2 + numRows 242 + rawDataSize 2566 + totalSize 2808 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [key] +Sort Columns: [Order(col:key, order:1)] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table nzhang_part14 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@nzhang_part14 +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: drop table nzhang_part14 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@nzhang_part14 +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: create table if not exists nzhang_part14 (key string, value string) + partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string) + partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: describe formatted nzhang_part14 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: describe formatted nzhang_part14 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table nzhang_part14 partition(ds, hr) +select key, value, ds, hr from ( + select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a + union all + select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c +) T +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: insert into table nzhang_part14 partition(ds, hr) +select key, value, ds, hr from ( + select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a + union all + select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c +) T +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@nzhang_part14@ds=1/hr=2 +POSTHOOK: Output: default@nzhang_part14@ds=1/hr=3 +POSTHOOK: Output: default@nzhang_part14@ds=2/hr=1 +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).value EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).value EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).value EXPRESSION [] +PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Partition Information +Partition Value: [1, 3] +Database: default +Table: nzhang_part14 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 2 + rawDataSize 6 + totalSize 8 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@nzhang_part14@ds=2010-03-03 +POSTHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=11 +POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=12 +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Partition Information +Partition Value: [2010-03-03, 12] +Database: default +Table: nzhang_part14 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 1000 + rawDataSize 10624 + totalSize 11624 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table nzhang_part14 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@nzhang_part14 +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: drop table nzhang_part14 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@nzhang_part14 +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: create table if not exists nzhang_part14 (key string, value string) +partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string) +partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@nzhang_part14@ds=2010-03-03 +POSTHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=11 +POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=12 +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Partition Information +Partition Value: [2010-03-03, 12] +Database: default +Table: nzhang_part14 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 1000 + rawDataSize 10624 + totalSize 11624 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table a +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@a +PREHOOK: Output: default@a +POSTHOOK: query: drop table a +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@a +POSTHOOK: Output: default@a +PREHOOK: query: create table a (key string, value string) +partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@a +POSTHOOK: query: create table a (key string, value string) +partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@a +PREHOOK: query: drop table b +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@b +PREHOOK: Output: default@b +POSTHOOK: query: drop table b +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@b +POSTHOOK: Output: default@b +PREHOOK: query: create table b (key string, value string) +partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@b +POSTHOOK: query: create table b (key string, value string) +partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@b +PREHOOK: query: drop table c +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table c +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table c (key string, value string) +partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@c +POSTHOOK: query: create table c (key string, value string) +partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@c +PREHOOK: query: FROM srcpart +INSERT into TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10 +INSERT into TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11 +INSERT into TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@a@ds=2010-03-11 +PREHOOK: Output: default@b@ds=2010-04-11 +PREHOOK: Output: default@c@ds=2010-05-11 +POSTHOOK: query: FROM srcpart +INSERT into TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10 +INSERT into TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11 +INSERT into TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@a@ds=2010-03-11/hr=11 +POSTHOOK: Output: default@a@ds=2010-03-11/hr=12 +POSTHOOK: Output: default@b@ds=2010-04-11/hr=12 +POSTHOOK: Output: default@c@ds=2010-05-11/hr=11 +POSTHOOK: Output: default@c@ds=2010-05-11/hr=12 +POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: b PARTITION(ds=2010-04-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: b PARTITION(ds=2010-04-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select key from a +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from a +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: a + Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: explain select value from b +PREHOOK: type: QUERY +POSTHOOK: query: explain select value from b +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: b + Statistics: Num rows: 1000 Data size: 91000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: value (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1000 Data size: 91000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: explain select key from b +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from b +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: b + Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: explain select value from c +PREHOOK: type: QUERY +POSTHOOK: query: explain select value from c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: c + Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: value (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: explain select key from c +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: c + Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + diff --git a/ql/src/test/results/clientpositive/autoColumnStats_3.q.out b/ql/src/test/results/clientpositive/autoColumnStats_3.q.out new file mode 100644 index 0000000..ee41910 --- /dev/null +++ b/ql/src/test/results/clientpositive/autoColumnStats_3.q.out @@ -0,0 +1,420 @@ +PREHOOK: query: drop table src_multi1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table src_multi1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table src_multi1 like src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_multi1 +POSTHOOK: query: create table src_multi1 like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_multi1 +PREHOOK: query: analyze table src_multi1 compute statistics for columns key +PREHOOK: type: QUERY +PREHOOK: Input: default@src_multi1 +#### A masked pattern was here #### +POSTHOOK: query: analyze table src_multi1 compute statistics for columns key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_multi1 +#### A masked pattern was here #### +PREHOOK: query: describe formatted src_multi1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_multi1 +POSTHOOK: query: describe formatted src_multi1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_multi1 +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 0 + numRows 0 + rawDataSize 0 + totalSize 0 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table src_multi1 select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@src_multi1 +POSTHOOK: query: insert into table src_multi1 select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_multi1 +POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted src_multi1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_multi1 +POSTHOOK: query: describe formatted src_multi1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_multi1 +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table nzhang_part14 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table nzhang_part14 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table if not exists nzhang_part14 (key string, value string) + partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string) + partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: describe formatted nzhang_part14 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: describe formatted nzhang_part14 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table nzhang_part14 partition(ds, hr) +select key, value, ds, hr from ( + select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a + union all + select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c +) T +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: insert into table nzhang_part14 partition(ds, hr) +select key, value, ds, hr from ( + select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a + union all + select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c +) T +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@nzhang_part14@ds=1/hr=2 +POSTHOOK: Output: default@nzhang_part14@ds=1/hr=3 +POSTHOOK: Output: default@nzhang_part14@ds=2/hr=1 +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).value EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).value EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).value EXPRESSION [] +PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Partition Information +Partition Value: [1, 3] +Database: default +Table: nzhang_part14 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 2 + rawDataSize 6 + totalSize 8 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: analyze table nzhang_part14 partition(ds='1', hr='3') compute statistics for columns value +PREHOOK: type: QUERY +PREHOOK: Input: default@nzhang_part14 +PREHOOK: Input: default@nzhang_part14@ds=1/hr=3 +#### A masked pattern was here #### +POSTHOOK: query: analyze table nzhang_part14 partition(ds='1', hr='3') compute statistics for columns value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@nzhang_part14 +POSTHOOK: Input: default@nzhang_part14@ds=1/hr=3 +#### A masked pattern was here #### +PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Partition Information +Partition Value: [1, 3] +Database: default +Table: nzhang_part14 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 2 + rawDataSize 6 + totalSize 8 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted nzhang_part14 partition(ds='2', hr='1') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: desc formatted nzhang_part14 partition(ds='2', hr='1') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Partition Information +Partition Value: [2, 1] +Database: default +Table: nzhang_part14 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 2 + rawDataSize 8 + totalSize 10 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table nzhang_part14 partition(ds, hr) +select key, value, ds, hr from ( + select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a + union all + select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c +) T +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: insert into table nzhang_part14 partition(ds, hr) +select key, value, ds, hr from ( + select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a + union all + select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c +) T +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@nzhang_part14@ds=1/hr=2 +POSTHOOK: Output: default@nzhang_part14@ds=1/hr=3 +POSTHOOK: Output: default@nzhang_part14@ds=2/hr=1 +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).value EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).value EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).value EXPRESSION [] +PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Partition Information +Partition Value: [1, 3] +Database: default +Table: nzhang_part14 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 2 + numRows 4 + rawDataSize 12 + totalSize 16 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted nzhang_part14 partition(ds='2', hr='1') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: desc formatted nzhang_part14 partition(ds='2', hr='1') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Partition Information +Partition Value: [2, 1] +Database: default +Table: nzhang_part14 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 2 + numRows 4 + rawDataSize 16 + totalSize 20 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 diff --git a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out new file mode 100644 index 0000000..0c6074a --- /dev/null +++ b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out @@ -0,0 +1,260 @@ +PREHOOK: query: create table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_dtt +POSTHOOK: query: create table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_dtt +PREHOOK: query: desc formatted acid_dtt +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@acid_dtt +POSTHOOK: query: desc formatted acid_dtt +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@acid_dtt +# col_name data_type comment + +a int +b varchar(128) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 0 + numRows 0 + rawDataSize 0 + totalSize 0 + transactional true +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [a] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-5 depends on stages: Stage-4, Stage-3 + Stage-4 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: cint is not null (type: boolean) + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int), CAST( cstring1 AS varchar(128)) (type: varchar(128)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: varchar(128)) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: varchar(128)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 2150 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 10 Data size: 2150 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: int), _col1 (type: varchar(128)) + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), VALUE._col1 (type: varchar(128)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 10 Data size: 2150 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 2150 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid_dtt + Select Operator + expressions: _col0 (type: int), _col1 (type: varchar(128)) + outputColumnNames: a, b + Statistics: Num rows: 10 Data size: 2150 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: compute_stats(a, 16), compute_stats(b, 16) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid_dtt + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-5 + Column Stats Work + Column Stats Desc: + Columns: a, b + Column Types: int, varchar(128) + Table: default.acid_dtt + + Stage: Stage-4 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + +PREHOOK: query: insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@acid_dtt +POSTHOOK: query: insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@acid_dtt +POSTHOOK: Lineage: acid_dtt.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_dtt.b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +PREHOOK: query: desc formatted acid_dtt +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@acid_dtt +POSTHOOK: query: desc formatted acid_dtt +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@acid_dtt +# col_name data_type comment + +a int +b varchar(128) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 2 + numRows 10 + rawDataSize 0 + totalSize 1714 + transactional true +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [a] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: delete from acid_dtt where b = '0ruyd6Y50JpdGRf6HqD' or b = '2uLyD28144vklju213J1mr' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_dtt +PREHOOK: Output: default@acid_dtt +POSTHOOK: query: delete from acid_dtt where b = '0ruyd6Y50JpdGRf6HqD' or b = '2uLyD28144vklju213J1mr' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_dtt +POSTHOOK: Output: default@acid_dtt +PREHOOK: query: desc formatted acid_dtt +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@acid_dtt +POSTHOOK: query: desc formatted acid_dtt +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@acid_dtt +# col_name data_type comment + +a int +b varchar(128) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 4 + numRows 8 + rawDataSize 0 + totalSize 2719 + transactional true +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [a] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 diff --git a/ql/src/test/results/clientpositive/autoColumnStats_5.q.out b/ql/src/test/results/clientpositive/autoColumnStats_5.q.out new file mode 100644 index 0000000..32372cb --- /dev/null +++ b/ql/src/test/results/clientpositive/autoColumnStats_5.q.out @@ -0,0 +1,378 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS +-- +-- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Partitioned +-- +-- +-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT +--- +CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@partitioned1 +POSTHOOK: query: -- SORT_QUERY_RESULTS +-- +-- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Partitioned +-- +-- +-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT +--- +CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@partitioned1 +PREHOOK: query: explain insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original') +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original') +POSTHOOK: type: QUERY +Explain +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-2 depends on stages: Stage-0 + Stage-8 depends on stages: Stage-2 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: values__tmp__table__1 + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partitioned1 + Select Operator + expressions: _col0 (type: int), _col1 (type: string), 1 (type: int) + outputColumnNames: a, b, part + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: compute_stats(a, 16), compute_stats(b, 16) + keys: 1 (type: int) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: 1 (type: int) + sort order: + + Map-reduce partition columns: 1 (type: int) + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: struct), _col2 (type: struct) + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + keys: 1 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: struct), _col2 (type: struct), 1 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + partition: + part 1 + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partitioned1 + + Stage: Stage-2 + Stats-Aggr Operator + + Stage: Stage-8 + Column Stats Work + Column Stats Desc: + Columns: a, b + Column Types: int, string + Table: default.partitioned1 + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partitioned1 + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partitioned1 + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@partitioned1@part=1 +POSTHOOK: query: insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@partitioned1@part=1 +POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +_col0 _col1 +PREHOOK: query: -- Table-Non-Cascade ADD COLUMNS ... +alter table partitioned1 add columns(c int, d string) +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@partitioned1 +PREHOOK: Output: default@partitioned1 +POSTHOOK: query: -- Table-Non-Cascade ADD COLUMNS ... +alter table partitioned1 add columns(c int, d string) +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@partitioned1 +POSTHOOK: Output: default@partitioned1 +PREHOOK: query: explain insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty') +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty') +POSTHOOK: type: QUERY +Explain +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: values__tmp__table__3 + Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string), UDFToInteger(tmp_values_col3) (type: int), tmp_values_col4 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partitioned1 + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + partition: + part 2 + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partitioned1 + + Stage: Stage-2 + Stats-Aggr Operator + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partitioned1 + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partitioned1 + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__4 +PREHOOK: Output: default@partitioned1@part=2 +POSTHOOK: query: insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__4 +POSTHOOK: Output: default@partitioned1@part=2 +POSTHOOK: Lineage: partitioned1 PARTITION(part=2).a EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: partitioned1 PARTITION(part=2).b SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: partitioned1 PARTITION(part=2).c EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col3, type:string, comment:), ] +POSTHOOK: Lineage: partitioned1 PARTITION(part=2).d SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col4, type:string, comment:), ] +_col0 _col1 _col2 _col3 +PREHOOK: query: explain insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred') +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred') +POSTHOOK: type: QUERY +Explain +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: values__tmp__table__5 + Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string), UDFToInteger(tmp_values_col3) (type: int), tmp_values_col4 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partitioned1 + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + partition: + part 1 + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partitioned1 + + Stage: Stage-2 + Stats-Aggr Operator + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partitioned1 + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partitioned1 + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__6 +PREHOOK: Output: default@partitioned1@part=1 +POSTHOOK: query: insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__6 +POSTHOOK: Output: default@partitioned1@part=1 +POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SIMPLE [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: partitioned1 PARTITION(part=1).c EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col3, type:string, comment:), ] +POSTHOOK: Lineage: partitioned1 PARTITION(part=1).d SIMPLE [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col4, type:string, comment:), ] +_col0 _col1 _col2 _col3 diff --git a/ql/src/test/results/clientpositive/autoColumnStats_6.q.out b/ql/src/test/results/clientpositive/autoColumnStats_6.q.out new file mode 100644 index 0000000..4cf32b2 --- /dev/null +++ b/ql/src/test/results/clientpositive/autoColumnStats_6.q.out @@ -0,0 +1,299 @@ +PREHOOK: query: DROP TABLE orcfile_merge2a +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE orcfile_merge2a +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE orcfile_merge2a (key INT, value STRING) + PARTITIONED BY (one string, two string, three string) + STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orcfile_merge2a +POSTHOOK: query: CREATE TABLE orcfile_merge2a (key INT, value STRING) + PARTITIONED BY (one string, two string, three string) + STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orcfile_merge2a +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three) + SELECT key, value, PMOD(HASH(key), 10) as two, + PMOD(HASH(value), 10) as three + FROM src +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three) + SELECT key, value, PMOD(HASH(key), 10) as two, + PMOD(HASH(value), 10) as three + FROM src +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-2 depends on stages: Stage-0 + Stage-8 depends on stages: Stage-2 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 10) (type: int), (hash(value) pmod 10) (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.orcfile_merge2a + Select Operator + expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), org.apache.hadoop.hive.ql.udf.UDFToString(_col2) (type: string), org.apache.hadoop.hive.ql.udf.UDFToString(_col3) (type: string) + outputColumnNames: key, value, one, two, three + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: compute_stats(key, 16), compute_stats(value, 16) + keys: '1' (type: string), two (type: string), three (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: '1' (type: string), _col1 (type: string), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: '1' (type: string), _col1 (type: string), _col2 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col3 (type: struct), _col4 (type: struct) + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + keys: '1' (type: string), KEY._col1 (type: string), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col3 (type: struct), _col4 (type: struct), '1' (type: string), _col1 (type: string), _col2 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + partition: + one 1 + three + two + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.orcfile_merge2a + + Stage: Stage-2 + Stats-Aggr Operator + + Stage: Stage-8 + Column Stats Work + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.orcfile_merge2a + + Stage: Stage-3 + Merge File Operator + Map Operator Tree: + ORC File Merge Operator + merge level: stripe + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + + Stage: Stage-5 + Merge File Operator + Map Operator Tree: + ORC File Merge Operator + merge level: stripe + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three) + SELECT key, value, PMOD(HASH(key), 10) as two, + PMOD(HASH(value), 10) as three + FROM src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@orcfile_merge2a@one=1 +POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three) + SELECT key, value, PMOD(HASH(key), 10) as two, + PMOD(HASH(value), 10) as three + FROM src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=0/three=2 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=0/three=8 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=1/three=3 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=1/three=9 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=2/three=0 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=2/three=4 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=3/three=1 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=3/three=5 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=4/three=2 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=4/three=6 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=5/three=3 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=5/three=7 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=6/three=4 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=6/three=8 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=7/three=5 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=7/three=9 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=8/three=0 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=8/three=6 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=9/three=1 +POSTHOOK: Output: default@orcfile_merge2a@one=1/two=9/three=7 +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=8).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=9).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=4).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=5).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=6).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=6).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=4).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=8).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=5).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=9).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=6).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=6).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +Found 1 items +#### A masked pattern was here #### +PREHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(*) USING 'tr \t _' AS (c) + FROM orcfile_merge2a +) t +PREHOOK: type: QUERY +PREHOOK: Input: default@orcfile_merge2a +PREHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=2 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=8 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=3 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=9 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=0 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=4 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=1 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=5 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=2 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=6 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=3 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=7 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=4 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=8 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=5 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=9 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=0 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=6 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=1 +PREHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=7 +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(*) USING 'tr \t _' AS (c) + FROM orcfile_merge2a +) t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orcfile_merge2a +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=2 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=8 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=3 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=9 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=0 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=4 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=1 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=5 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=2 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=6 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=3 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=7 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=4 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=8 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=5 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=9 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=0 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=6 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=1 +POSTHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=7 +#### A masked pattern was here #### +-4209012844 +PREHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(key, value, '1', PMOD(HASH(key), 10), + PMOD(HASH(value), 10)) USING 'tr \t _' AS (c) + FROM src +) t +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(key, value, '1', PMOD(HASH(key), 10), + PMOD(HASH(value), 10)) USING 'tr \t _' AS (c) + FROM src +) t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +-4209012844 +PREHOOK: query: DROP TABLE orcfile_merge2a +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@orcfile_merge2a +PREHOOK: Output: default@orcfile_merge2a +POSTHOOK: query: DROP TABLE orcfile_merge2a +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@orcfile_merge2a +POSTHOOK: Output: default@orcfile_merge2a diff --git a/ql/src/test/results/clientpositive/autoColumnStats_7.q.out b/ql/src/test/results/clientpositive/autoColumnStats_7.q.out new file mode 100644 index 0000000..9422d65 --- /dev/null +++ b/ql/src/test/results/clientpositive/autoColumnStats_7.q.out @@ -0,0 +1,216 @@ +PREHOOK: query: -- Taken from groupby2.q +CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@dest_g2 +POSTHOOK: query: -- Taken from groupby2.q +CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@dest_g2 +PREHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_temp +POSTHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_temp +PREHOOK: query: explain FROM src_temp +INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) +PREHOOK: type: QUERY +POSTHOOK: query: explain FROM src_temp +INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-6 depends on stages: Stage-3, Stage-5 + Stage-4 depends on stages: Stage-2 + Stage-5 depends on stages: Stage-4 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src_temp + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: substr(key, 1, 1) (type: string), substr(value, 5) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Group By Operator + aggregations: count(DISTINCT KEY._col1:0._col0), sum(KEY._col1:0._col0) + keys: KEY._col0 (type: string) + mode: partial1 + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint), _col2 (type: double) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0), sum(VALUE._col1) + keys: KEY._col0 (type: string) + mode: final + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), concat(_col0, _col2) (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_g2 + Select Operator + expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) + outputColumnNames: key, c1, c2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_g2 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-6 + Column Stats Work + Column Stats Desc: + Columns: key, c1, c2 + Column Types: string, int, string + Table: default.dest_g2 + + Stage: Stage-4 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + sort order: + Map-reduce partition columns: rand() (type: double) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + value expressions: key (type: string), 16 (type: int), c1 (type: int), c2 (type: string) + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0, 16), compute_stats(VALUE._col2, 16), compute_stats(VALUE._col3, 16) + mode: partial1 + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct) + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2) + mode: final + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + +PREHOOK: query: FROM src_temp +INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_temp +PREHOOK: Output: default@dest_g2 +POSTHOOK: query: FROM src_temp +INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_temp +POSTHOOK: Output: default@dest_g2 +POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src_temp)src_temp.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest_g2.c2 EXPRESSION [(src_temp)src_temp.FieldSchema(name:key, type:string, comment:null), (src_temp)src_temp.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src_temp)src_temp.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT dest_g2.* FROM dest_g2 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest_g2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT dest_g2.* FROM dest_g2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest_g2 +#### A masked pattern was here #### +0 1 00.0 +1 71 116414.0 +2 69 225571.0 +3 62 332004.0 +4 74 452763.0 +5 6 5397.0 +6 5 6398.0 +7 6 7735.0 +8 8 8762.0 +9 7 91047.0 +PREHOOK: query: DROP TABLE dest_g2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@dest_g2 +PREHOOK: Output: default@dest_g2 +POSTHOOK: query: DROP TABLE dest_g2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@dest_g2 +POSTHOOK: Output: default@dest_g2 +PREHOOK: query: DROP TABLE src_temp +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@src_temp +PREHOOK: Output: default@src_temp +POSTHOOK: query: DROP TABLE src_temp +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@src_temp +POSTHOOK: Output: default@src_temp diff --git a/ql/src/test/results/clientpositive/autoColumnStats_8.q.out b/ql/src/test/results/clientpositive/autoColumnStats_8.q.out new file mode 100644 index 0000000..2b9230d --- /dev/null +++ b/ql/src/test/results/clientpositive/autoColumnStats_8.q.out @@ -0,0 +1,2624 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +show partitions srcpart +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@srcpart +POSTHOOK: query: -- SORT_QUERY_RESULTS + +show partitions srcpart +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@srcpart +ds=2008-04-08/hr=11 +ds=2008-04-08/hr=12 +ds=2008-04-09/hr=11 +ds=2008-04-09/hr=12 +PREHOOK: query: create table if not exists nzhang_part8 like srcpart +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_part8 +POSTHOOK: query: create table if not exists nzhang_part8 like srcpart +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@nzhang_part8 +PREHOOK: query: describe extended nzhang_part8 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part8 +POSTHOOK: query: describe extended nzhang_part8 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part8 +key string default +value string default +ds string +hr string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +#### A masked pattern was here #### +PREHOOK: query: explain extended +from srcpart +insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: explain extended +from srcpart +insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-6 depends on stages: Stage-5, Stage-3, Stage-4 + Stage-7 depends on stages: Stage-5, Stage-3, Stage-4 + Stage-1 depends on stages: Stage-2 + Stage-4 depends on stages: Stage-1 + Stage-5 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: (ds <= '2008-04-08') (type: boolean) + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.nzhang_part8 + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct nzhang_part8 { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part8 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) + outputColumnNames: key, value, ds, hr + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: compute_stats(key, 16), compute_stats(value, 16) + keys: ds (type: string), hr (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + null sort order: aa + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col2 (type: struct), _col3 (type: struct) + auto parallelism: false + Filter Operator + isSamplingPred: false + predicate: (ds > '2008-04-08') (type: boolean) + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), hr (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 2 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Static Partition Specification: ds=2008-12-31/ + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.nzhang_part8 + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct nzhang_part8 { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part8 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false + Select Operator + expressions: _col0 (type: string), _col1 (type: string), '2008-12-31' (type: string), _col2 (type: string) + outputColumnNames: key, value, ds, hr + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: compute_stats(key, 16), compute_stats(value, 16) + keys: '2008-12-31' (type: string), hr (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string,string,struct,struct + escape.delim \ + serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: hr=11 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + hr 11 + properties: + COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"} + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.srcpart + numFiles 1 + numRows 500 + partition_columns ds/hr + partition_columns.types string:string + rawDataSize 5312 + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.srcpart + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart + name: default.srcpart +#### A masked pattern was here #### + Partition + base file name: hr=12 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + hr 12 + properties: + COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"} + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.srcpart + numFiles 1 + numRows 500 + partition_columns ds/hr + partition_columns.types string:string + rawDataSize 5312 + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.srcpart + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart + name: default.srcpart +#### A masked pattern was here #### + Partition + base file name: hr=11 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + hr 11 + properties: + COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"} + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.srcpart + numFiles 1 + numRows 500 + partition_columns ds/hr + partition_columns.types string:string + rawDataSize 5312 + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.srcpart + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart + name: default.srcpart +#### A masked pattern was here #### + Partition + base file name: hr=12 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + hr 12 + properties: + COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"} + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.srcpart + numFiles 1 + numRows 500 + partition_columns ds/hr + partition_columns.types string:string + rawDataSize 5312 + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.srcpart + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart + name: default.srcpart + Truncated Path -> Alias: + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] + /srcpart/ds=2008-04-09/hr=11 [srcpart] + /srcpart/ds=2008-04-09/hr=12 [srcpart] + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col2 (type: struct), _col3 (type: struct), _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types struct:struct:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Move Operator + tables: + partition: + ds + hr + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.nzhang_part8 + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct nzhang_part8 { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part8 + + Stage: Stage-3 + Stats-Aggr Operator +#### A masked pattern was here #### + + Stage: Stage-6 + Column Stats Work + Column Stats Desc: + Columns: key, value + Column Types: string, string + Table: default.nzhang_part8 + Is Table Level Stats: false + + Stage: Stage-7 + Column Stats Work + Column Stats Desc: + Columns: key, value + Column Types: string, string + Table: default.nzhang_part8 + Is Table Level Stats: false + + Stage: Stage-1 + Move Operator + tables: + partition: + ds 2008-12-31 + hr + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.nzhang_part8 + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct nzhang_part8 { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part8 + + Stage: Stage-4 + Stats-Aggr Operator +#### A masked pattern was here #### + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false + Reduce Output Operator + key expressions: '2008-12-31' (type: string), _col1 (type: string) + null sort order: aa + sort order: ++ + Map-reduce partition columns: '2008-12-31' (type: string), _col1 (type: string) + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col2 (type: struct), _col3 (type: struct) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -mr-10004 + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string,string,struct,struct + escape.delim \ + serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string,string,struct,struct + escape.delim \ + serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + Truncated Path -> Alias: +#### A masked pattern was here #### + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + keys: '2008-12-31' (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col2 (type: struct), _col3 (type: struct), '2008-12-31' (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types struct:struct:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + +PREHOOK: query: from srcpart +insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@nzhang_part8 +PREHOOK: Output: default@nzhang_part8@ds=2008-12-31 +POSTHOOK: query: from srcpart +insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@nzhang_part8@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@nzhang_part8@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@nzhang_part8@ds=2008-12-31/hr=11 +POSTHOOK: Output: default@nzhang_part8@ds=2008-12-31/hr=12 +POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions nzhang_part8 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@nzhang_part8 +POSTHOOK: query: show partitions nzhang_part8 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@nzhang_part8 +ds=2008-04-08/hr=11 +ds=2008-04-08/hr=12 +ds=2008-12-31/hr=11 +ds=2008-12-31/hr=12 +PREHOOK: query: select * from nzhang_part8 where ds is not null and hr is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@nzhang_part8 +PREHOOK: Input: default@nzhang_part8@ds=2008-04-08/hr=11 +PREHOOK: Input: default@nzhang_part8@ds=2008-04-08/hr=12 +PREHOOK: Input: default@nzhang_part8@ds=2008-12-31/hr=11 +PREHOOK: Input: default@nzhang_part8@ds=2008-12-31/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select * from nzhang_part8 where ds is not null and hr is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@nzhang_part8 +POSTHOOK: Input: default@nzhang_part8@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@nzhang_part8@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@nzhang_part8@ds=2008-12-31/hr=11 +POSTHOOK: Input: default@nzhang_part8@ds=2008-12-31/hr=12 +#### A masked pattern was here #### +0 val_0 2008-04-08 11 +0 val_0 2008-04-08 11 +0 val_0 2008-04-08 11 +0 val_0 2008-04-08 12 +0 val_0 2008-04-08 12 +0 val_0 2008-04-08 12 +0 val_0 2008-12-31 11 +0 val_0 2008-12-31 11 +0 val_0 2008-12-31 11 +0 val_0 2008-12-31 12 +0 val_0 2008-12-31 12 +0 val_0 2008-12-31 12 +10 val_10 2008-04-08 11 +10 val_10 2008-04-08 12 +10 val_10 2008-12-31 11 +10 val_10 2008-12-31 12 +100 val_100 2008-04-08 11 +100 val_100 2008-04-08 11 +100 val_100 2008-04-08 12 +100 val_100 2008-04-08 12 +100 val_100 2008-12-31 11 +100 val_100 2008-12-31 11 +100 val_100 2008-12-31 12 +100 val_100 2008-12-31 12 +103 val_103 2008-04-08 11 +103 val_103 2008-04-08 11 +103 val_103 2008-04-08 12 +103 val_103 2008-04-08 12 +103 val_103 2008-12-31 11 +103 val_103 2008-12-31 11 +103 val_103 2008-12-31 12 +103 val_103 2008-12-31 12 +104 val_104 2008-04-08 11 +104 val_104 2008-04-08 11 +104 val_104 2008-04-08 12 +104 val_104 2008-04-08 12 +104 val_104 2008-12-31 11 +104 val_104 2008-12-31 11 +104 val_104 2008-12-31 12 +104 val_104 2008-12-31 12 +105 val_105 2008-04-08 11 +105 val_105 2008-04-08 12 +105 val_105 2008-12-31 11 +105 val_105 2008-12-31 12 +11 val_11 2008-04-08 11 +11 val_11 2008-04-08 12 +11 val_11 2008-12-31 11 +11 val_11 2008-12-31 12 +111 val_111 2008-04-08 11 +111 val_111 2008-04-08 12 +111 val_111 2008-12-31 11 +111 val_111 2008-12-31 12 +113 val_113 2008-04-08 11 +113 val_113 2008-04-08 11 +113 val_113 2008-04-08 12 +113 val_113 2008-04-08 12 +113 val_113 2008-12-31 11 +113 val_113 2008-12-31 11 +113 val_113 2008-12-31 12 +113 val_113 2008-12-31 12 +114 val_114 2008-04-08 11 +114 val_114 2008-04-08 12 +114 val_114 2008-12-31 11 +114 val_114 2008-12-31 12 +116 val_116 2008-04-08 11 +116 val_116 2008-04-08 12 +116 val_116 2008-12-31 11 +116 val_116 2008-12-31 12 +118 val_118 2008-04-08 11 +118 val_118 2008-04-08 11 +118 val_118 2008-04-08 12 +118 val_118 2008-04-08 12 +118 val_118 2008-12-31 11 +118 val_118 2008-12-31 11 +118 val_118 2008-12-31 12 +118 val_118 2008-12-31 12 +119 val_119 2008-04-08 11 +119 val_119 2008-04-08 11 +119 val_119 2008-04-08 11 +119 val_119 2008-04-08 12 +119 val_119 2008-04-08 12 +119 val_119 2008-04-08 12 +119 val_119 2008-12-31 11 +119 val_119 2008-12-31 11 +119 val_119 2008-12-31 11 +119 val_119 2008-12-31 12 +119 val_119 2008-12-31 12 +119 val_119 2008-12-31 12 +12 val_12 2008-04-08 11 +12 val_12 2008-04-08 11 +12 val_12 2008-04-08 12 +12 val_12 2008-04-08 12 +12 val_12 2008-12-31 11 +12 val_12 2008-12-31 11 +12 val_12 2008-12-31 12 +12 val_12 2008-12-31 12 +120 val_120 2008-04-08 11 +120 val_120 2008-04-08 11 +120 val_120 2008-04-08 12 +120 val_120 2008-04-08 12 +120 val_120 2008-12-31 11 +120 val_120 2008-12-31 11 +120 val_120 2008-12-31 12 +120 val_120 2008-12-31 12 +125 val_125 2008-04-08 11 +125 val_125 2008-04-08 11 +125 val_125 2008-04-08 12 +125 val_125 2008-04-08 12 +125 val_125 2008-12-31 11 +125 val_125 2008-12-31 11 +125 val_125 2008-12-31 12 +125 val_125 2008-12-31 12 +126 val_126 2008-04-08 11 +126 val_126 2008-04-08 12 +126 val_126 2008-12-31 11 +126 val_126 2008-12-31 12 +128 val_128 2008-04-08 11 +128 val_128 2008-04-08 11 +128 val_128 2008-04-08 11 +128 val_128 2008-04-08 12 +128 val_128 2008-04-08 12 +128 val_128 2008-04-08 12 +128 val_128 2008-12-31 11 +128 val_128 2008-12-31 11 +128 val_128 2008-12-31 11 +128 val_128 2008-12-31 12 +128 val_128 2008-12-31 12 +128 val_128 2008-12-31 12 +129 val_129 2008-04-08 11 +129 val_129 2008-04-08 11 +129 val_129 2008-04-08 12 +129 val_129 2008-04-08 12 +129 val_129 2008-12-31 11 +129 val_129 2008-12-31 11 +129 val_129 2008-12-31 12 +129 val_129 2008-12-31 12 +131 val_131 2008-04-08 11 +131 val_131 2008-04-08 12 +131 val_131 2008-12-31 11 +131 val_131 2008-12-31 12 +133 val_133 2008-04-08 11 +133 val_133 2008-04-08 12 +133 val_133 2008-12-31 11 +133 val_133 2008-12-31 12 +134 val_134 2008-04-08 11 +134 val_134 2008-04-08 11 +134 val_134 2008-04-08 12 +134 val_134 2008-04-08 12 +134 val_134 2008-12-31 11 +134 val_134 2008-12-31 11 +134 val_134 2008-12-31 12 +134 val_134 2008-12-31 12 +136 val_136 2008-04-08 11 +136 val_136 2008-04-08 12 +136 val_136 2008-12-31 11 +136 val_136 2008-12-31 12 +137 val_137 2008-04-08 11 +137 val_137 2008-04-08 11 +137 val_137 2008-04-08 12 +137 val_137 2008-04-08 12 +137 val_137 2008-12-31 11 +137 val_137 2008-12-31 11 +137 val_137 2008-12-31 12 +137 val_137 2008-12-31 12 +138 val_138 2008-04-08 11 +138 val_138 2008-04-08 11 +138 val_138 2008-04-08 11 +138 val_138 2008-04-08 11 +138 val_138 2008-04-08 12 +138 val_138 2008-04-08 12 +138 val_138 2008-04-08 12 +138 val_138 2008-04-08 12 +138 val_138 2008-12-31 11 +138 val_138 2008-12-31 11 +138 val_138 2008-12-31 11 +138 val_138 2008-12-31 11 +138 val_138 2008-12-31 12 +138 val_138 2008-12-31 12 +138 val_138 2008-12-31 12 +138 val_138 2008-12-31 12 +143 val_143 2008-04-08 11 +143 val_143 2008-04-08 12 +143 val_143 2008-12-31 11 +143 val_143 2008-12-31 12 +145 val_145 2008-04-08 11 +145 val_145 2008-04-08 12 +145 val_145 2008-12-31 11 +145 val_145 2008-12-31 12 +146 val_146 2008-04-08 11 +146 val_146 2008-04-08 11 +146 val_146 2008-04-08 12 +146 val_146 2008-04-08 12 +146 val_146 2008-12-31 11 +146 val_146 2008-12-31 11 +146 val_146 2008-12-31 12 +146 val_146 2008-12-31 12 +149 val_149 2008-04-08 11 +149 val_149 2008-04-08 11 +149 val_149 2008-04-08 12 +149 val_149 2008-04-08 12 +149 val_149 2008-12-31 11 +149 val_149 2008-12-31 11 +149 val_149 2008-12-31 12 +149 val_149 2008-12-31 12 +15 val_15 2008-04-08 11 +15 val_15 2008-04-08 11 +15 val_15 2008-04-08 12 +15 val_15 2008-04-08 12 +15 val_15 2008-12-31 11 +15 val_15 2008-12-31 11 +15 val_15 2008-12-31 12 +15 val_15 2008-12-31 12 +150 val_150 2008-04-08 11 +150 val_150 2008-04-08 12 +150 val_150 2008-12-31 11 +150 val_150 2008-12-31 12 +152 val_152 2008-04-08 11 +152 val_152 2008-04-08 11 +152 val_152 2008-04-08 12 +152 val_152 2008-04-08 12 +152 val_152 2008-12-31 11 +152 val_152 2008-12-31 11 +152 val_152 2008-12-31 12 +152 val_152 2008-12-31 12 +153 val_153 2008-04-08 11 +153 val_153 2008-04-08 12 +153 val_153 2008-12-31 11 +153 val_153 2008-12-31 12 +155 val_155 2008-04-08 11 +155 val_155 2008-04-08 12 +155 val_155 2008-12-31 11 +155 val_155 2008-12-31 12 +156 val_156 2008-04-08 11 +156 val_156 2008-04-08 12 +156 val_156 2008-12-31 11 +156 val_156 2008-12-31 12 +157 val_157 2008-04-08 11 +157 val_157 2008-04-08 12 +157 val_157 2008-12-31 11 +157 val_157 2008-12-31 12 +158 val_158 2008-04-08 11 +158 val_158 2008-04-08 12 +158 val_158 2008-12-31 11 +158 val_158 2008-12-31 12 +160 val_160 2008-04-08 11 +160 val_160 2008-04-08 12 +160 val_160 2008-12-31 11 +160 val_160 2008-12-31 12 +162 val_162 2008-04-08 11 +162 val_162 2008-04-08 12 +162 val_162 2008-12-31 11 +162 val_162 2008-12-31 12 +163 val_163 2008-04-08 11 +163 val_163 2008-04-08 12 +163 val_163 2008-12-31 11 +163 val_163 2008-12-31 12 +164 val_164 2008-04-08 11 +164 val_164 2008-04-08 11 +164 val_164 2008-04-08 12 +164 val_164 2008-04-08 12 +164 val_164 2008-12-31 11 +164 val_164 2008-12-31 11 +164 val_164 2008-12-31 12 +164 val_164 2008-12-31 12 +165 val_165 2008-04-08 11 +165 val_165 2008-04-08 11 +165 val_165 2008-04-08 12 +165 val_165 2008-04-08 12 +165 val_165 2008-12-31 11 +165 val_165 2008-12-31 11 +165 val_165 2008-12-31 12 +165 val_165 2008-12-31 12 +166 val_166 2008-04-08 11 +166 val_166 2008-04-08 12 +166 val_166 2008-12-31 11 +166 val_166 2008-12-31 12 +167 val_167 2008-04-08 11 +167 val_167 2008-04-08 11 +167 val_167 2008-04-08 11 +167 val_167 2008-04-08 12 +167 val_167 2008-04-08 12 +167 val_167 2008-04-08 12 +167 val_167 2008-12-31 11 +167 val_167 2008-12-31 11 +167 val_167 2008-12-31 11 +167 val_167 2008-12-31 12 +167 val_167 2008-12-31 12 +167 val_167 2008-12-31 12 +168 val_168 2008-04-08 11 +168 val_168 2008-04-08 12 +168 val_168 2008-12-31 11 +168 val_168 2008-12-31 12 +169 val_169 2008-04-08 11 +169 val_169 2008-04-08 11 +169 val_169 2008-04-08 11 +169 val_169 2008-04-08 11 +169 val_169 2008-04-08 12 +169 val_169 2008-04-08 12 +169 val_169 2008-04-08 12 +169 val_169 2008-04-08 12 +169 val_169 2008-12-31 11 +169 val_169 2008-12-31 11 +169 val_169 2008-12-31 11 +169 val_169 2008-12-31 11 +169 val_169 2008-12-31 12 +169 val_169 2008-12-31 12 +169 val_169 2008-12-31 12 +169 val_169 2008-12-31 12 +17 val_17 2008-04-08 11 +17 val_17 2008-04-08 12 +17 val_17 2008-12-31 11 +17 val_17 2008-12-31 12 +170 val_170 2008-04-08 11 +170 val_170 2008-04-08 12 +170 val_170 2008-12-31 11 +170 val_170 2008-12-31 12 +172 val_172 2008-04-08 11 +172 val_172 2008-04-08 11 +172 val_172 2008-04-08 12 +172 val_172 2008-04-08 12 +172 val_172 2008-12-31 11 +172 val_172 2008-12-31 11 +172 val_172 2008-12-31 12 +172 val_172 2008-12-31 12 +174 val_174 2008-04-08 11 +174 val_174 2008-04-08 11 +174 val_174 2008-04-08 12 +174 val_174 2008-04-08 12 +174 val_174 2008-12-31 11 +174 val_174 2008-12-31 11 +174 val_174 2008-12-31 12 +174 val_174 2008-12-31 12 +175 val_175 2008-04-08 11 +175 val_175 2008-04-08 11 +175 val_175 2008-04-08 12 +175 val_175 2008-04-08 12 +175 val_175 2008-12-31 11 +175 val_175 2008-12-31 11 +175 val_175 2008-12-31 12 +175 val_175 2008-12-31 12 +176 val_176 2008-04-08 11 +176 val_176 2008-04-08 11 +176 val_176 2008-04-08 12 +176 val_176 2008-04-08 12 +176 val_176 2008-12-31 11 +176 val_176 2008-12-31 11 +176 val_176 2008-12-31 12 +176 val_176 2008-12-31 12 +177 val_177 2008-04-08 11 +177 val_177 2008-04-08 12 +177 val_177 2008-12-31 11 +177 val_177 2008-12-31 12 +178 val_178 2008-04-08 11 +178 val_178 2008-04-08 12 +178 val_178 2008-12-31 11 +178 val_178 2008-12-31 12 +179 val_179 2008-04-08 11 +179 val_179 2008-04-08 11 +179 val_179 2008-04-08 12 +179 val_179 2008-04-08 12 +179 val_179 2008-12-31 11 +179 val_179 2008-12-31 11 +179 val_179 2008-12-31 12 +179 val_179 2008-12-31 12 +18 val_18 2008-04-08 11 +18 val_18 2008-04-08 11 +18 val_18 2008-04-08 12 +18 val_18 2008-04-08 12 +18 val_18 2008-12-31 11 +18 val_18 2008-12-31 11 +18 val_18 2008-12-31 12 +18 val_18 2008-12-31 12 +180 val_180 2008-04-08 11 +180 val_180 2008-04-08 12 +180 val_180 2008-12-31 11 +180 val_180 2008-12-31 12 +181 val_181 2008-04-08 11 +181 val_181 2008-04-08 12 +181 val_181 2008-12-31 11 +181 val_181 2008-12-31 12 +183 val_183 2008-04-08 11 +183 val_183 2008-04-08 12 +183 val_183 2008-12-31 11 +183 val_183 2008-12-31 12 +186 val_186 2008-04-08 11 +186 val_186 2008-04-08 12 +186 val_186 2008-12-31 11 +186 val_186 2008-12-31 12 +187 val_187 2008-04-08 11 +187 val_187 2008-04-08 11 +187 val_187 2008-04-08 11 +187 val_187 2008-04-08 12 +187 val_187 2008-04-08 12 +187 val_187 2008-04-08 12 +187 val_187 2008-12-31 11 +187 val_187 2008-12-31 11 +187 val_187 2008-12-31 11 +187 val_187 2008-12-31 12 +187 val_187 2008-12-31 12 +187 val_187 2008-12-31 12 +189 val_189 2008-04-08 11 +189 val_189 2008-04-08 12 +189 val_189 2008-12-31 11 +189 val_189 2008-12-31 12 +19 val_19 2008-04-08 11 +19 val_19 2008-04-08 12 +19 val_19 2008-12-31 11 +19 val_19 2008-12-31 12 +190 val_190 2008-04-08 11 +190 val_190 2008-04-08 12 +190 val_190 2008-12-31 11 +190 val_190 2008-12-31 12 +191 val_191 2008-04-08 11 +191 val_191 2008-04-08 11 +191 val_191 2008-04-08 12 +191 val_191 2008-04-08 12 +191 val_191 2008-12-31 11 +191 val_191 2008-12-31 11 +191 val_191 2008-12-31 12 +191 val_191 2008-12-31 12 +192 val_192 2008-04-08 11 +192 val_192 2008-04-08 12 +192 val_192 2008-12-31 11 +192 val_192 2008-12-31 12 +193 val_193 2008-04-08 11 +193 val_193 2008-04-08 11 +193 val_193 2008-04-08 11 +193 val_193 2008-04-08 12 +193 val_193 2008-04-08 12 +193 val_193 2008-04-08 12 +193 val_193 2008-12-31 11 +193 val_193 2008-12-31 11 +193 val_193 2008-12-31 11 +193 val_193 2008-12-31 12 +193 val_193 2008-12-31 12 +193 val_193 2008-12-31 12 +194 val_194 2008-04-08 11 +194 val_194 2008-04-08 12 +194 val_194 2008-12-31 11 +194 val_194 2008-12-31 12 +195 val_195 2008-04-08 11 +195 val_195 2008-04-08 11 +195 val_195 2008-04-08 12 +195 val_195 2008-04-08 12 +195 val_195 2008-12-31 11 +195 val_195 2008-12-31 11 +195 val_195 2008-12-31 12 +195 val_195 2008-12-31 12 +196 val_196 2008-04-08 11 +196 val_196 2008-04-08 12 +196 val_196 2008-12-31 11 +196 val_196 2008-12-31 12 +197 val_197 2008-04-08 11 +197 val_197 2008-04-08 11 +197 val_197 2008-04-08 12 +197 val_197 2008-04-08 12 +197 val_197 2008-12-31 11 +197 val_197 2008-12-31 11 +197 val_197 2008-12-31 12 +197 val_197 2008-12-31 12 +199 val_199 2008-04-08 11 +199 val_199 2008-04-08 11 +199 val_199 2008-04-08 11 +199 val_199 2008-04-08 12 +199 val_199 2008-04-08 12 +199 val_199 2008-04-08 12 +199 val_199 2008-12-31 11 +199 val_199 2008-12-31 11 +199 val_199 2008-12-31 11 +199 val_199 2008-12-31 12 +199 val_199 2008-12-31 12 +199 val_199 2008-12-31 12 +2 val_2 2008-04-08 11 +2 val_2 2008-04-08 12 +2 val_2 2008-12-31 11 +2 val_2 2008-12-31 12 +20 val_20 2008-04-08 11 +20 val_20 2008-04-08 12 +20 val_20 2008-12-31 11 +20 val_20 2008-12-31 12 +200 val_200 2008-04-08 11 +200 val_200 2008-04-08 11 +200 val_200 2008-04-08 12 +200 val_200 2008-04-08 12 +200 val_200 2008-12-31 11 +200 val_200 2008-12-31 11 +200 val_200 2008-12-31 12 +200 val_200 2008-12-31 12 +201 val_201 2008-04-08 11 +201 val_201 2008-04-08 12 +201 val_201 2008-12-31 11 +201 val_201 2008-12-31 12 +202 val_202 2008-04-08 11 +202 val_202 2008-04-08 12 +202 val_202 2008-12-31 11 +202 val_202 2008-12-31 12 +203 val_203 2008-04-08 11 +203 val_203 2008-04-08 11 +203 val_203 2008-04-08 12 +203 val_203 2008-04-08 12 +203 val_203 2008-12-31 11 +203 val_203 2008-12-31 11 +203 val_203 2008-12-31 12 +203 val_203 2008-12-31 12 +205 val_205 2008-04-08 11 +205 val_205 2008-04-08 11 +205 val_205 2008-04-08 12 +205 val_205 2008-04-08 12 +205 val_205 2008-12-31 11 +205 val_205 2008-12-31 11 +205 val_205 2008-12-31 12 +205 val_205 2008-12-31 12 +207 val_207 2008-04-08 11 +207 val_207 2008-04-08 11 +207 val_207 2008-04-08 12 +207 val_207 2008-04-08 12 +207 val_207 2008-12-31 11 +207 val_207 2008-12-31 11 +207 val_207 2008-12-31 12 +207 val_207 2008-12-31 12 +208 val_208 2008-04-08 11 +208 val_208 2008-04-08 11 +208 val_208 2008-04-08 11 +208 val_208 2008-04-08 12 +208 val_208 2008-04-08 12 +208 val_208 2008-04-08 12 +208 val_208 2008-12-31 11 +208 val_208 2008-12-31 11 +208 val_208 2008-12-31 11 +208 val_208 2008-12-31 12 +208 val_208 2008-12-31 12 +208 val_208 2008-12-31 12 +209 val_209 2008-04-08 11 +209 val_209 2008-04-08 11 +209 val_209 2008-04-08 12 +209 val_209 2008-04-08 12 +209 val_209 2008-12-31 11 +209 val_209 2008-12-31 11 +209 val_209 2008-12-31 12 +209 val_209 2008-12-31 12 +213 val_213 2008-04-08 11 +213 val_213 2008-04-08 11 +213 val_213 2008-04-08 12 +213 val_213 2008-04-08 12 +213 val_213 2008-12-31 11 +213 val_213 2008-12-31 11 +213 val_213 2008-12-31 12 +213 val_213 2008-12-31 12 +214 val_214 2008-04-08 11 +214 val_214 2008-04-08 12 +214 val_214 2008-12-31 11 +214 val_214 2008-12-31 12 +216 val_216 2008-04-08 11 +216 val_216 2008-04-08 11 +216 val_216 2008-04-08 12 +216 val_216 2008-04-08 12 +216 val_216 2008-12-31 11 +216 val_216 2008-12-31 11 +216 val_216 2008-12-31 12 +216 val_216 2008-12-31 12 +217 val_217 2008-04-08 11 +217 val_217 2008-04-08 11 +217 val_217 2008-04-08 12 +217 val_217 2008-04-08 12 +217 val_217 2008-12-31 11 +217 val_217 2008-12-31 11 +217 val_217 2008-12-31 12 +217 val_217 2008-12-31 12 +218 val_218 2008-04-08 11 +218 val_218 2008-04-08 12 +218 val_218 2008-12-31 11 +218 val_218 2008-12-31 12 +219 val_219 2008-04-08 11 +219 val_219 2008-04-08 11 +219 val_219 2008-04-08 12 +219 val_219 2008-04-08 12 +219 val_219 2008-12-31 11 +219 val_219 2008-12-31 11 +219 val_219 2008-12-31 12 +219 val_219 2008-12-31 12 +221 val_221 2008-04-08 11 +221 val_221 2008-04-08 11 +221 val_221 2008-04-08 12 +221 val_221 2008-04-08 12 +221 val_221 2008-12-31 11 +221 val_221 2008-12-31 11 +221 val_221 2008-12-31 12 +221 val_221 2008-12-31 12 +222 val_222 2008-04-08 11 +222 val_222 2008-04-08 12 +222 val_222 2008-12-31 11 +222 val_222 2008-12-31 12 +223 val_223 2008-04-08 11 +223 val_223 2008-04-08 11 +223 val_223 2008-04-08 12 +223 val_223 2008-04-08 12 +223 val_223 2008-12-31 11 +223 val_223 2008-12-31 11 +223 val_223 2008-12-31 12 +223 val_223 2008-12-31 12 +224 val_224 2008-04-08 11 +224 val_224 2008-04-08 11 +224 val_224 2008-04-08 12 +224 val_224 2008-04-08 12 +224 val_224 2008-12-31 11 +224 val_224 2008-12-31 11 +224 val_224 2008-12-31 12 +224 val_224 2008-12-31 12 +226 val_226 2008-04-08 11 +226 val_226 2008-04-08 12 +226 val_226 2008-12-31 11 +226 val_226 2008-12-31 12 +228 val_228 2008-04-08 11 +228 val_228 2008-04-08 12 +228 val_228 2008-12-31 11 +228 val_228 2008-12-31 12 +229 val_229 2008-04-08 11 +229 val_229 2008-04-08 11 +229 val_229 2008-04-08 12 +229 val_229 2008-04-08 12 +229 val_229 2008-12-31 11 +229 val_229 2008-12-31 11 +229 val_229 2008-12-31 12 +229 val_229 2008-12-31 12 +230 val_230 2008-04-08 11 +230 val_230 2008-04-08 11 +230 val_230 2008-04-08 11 +230 val_230 2008-04-08 11 +230 val_230 2008-04-08 11 +230 val_230 2008-04-08 12 +230 val_230 2008-04-08 12 +230 val_230 2008-04-08 12 +230 val_230 2008-04-08 12 +230 val_230 2008-04-08 12 +230 val_230 2008-12-31 11 +230 val_230 2008-12-31 11 +230 val_230 2008-12-31 11 +230 val_230 2008-12-31 11 +230 val_230 2008-12-31 11 +230 val_230 2008-12-31 12 +230 val_230 2008-12-31 12 +230 val_230 2008-12-31 12 +230 val_230 2008-12-31 12 +230 val_230 2008-12-31 12 +233 val_233 2008-04-08 11 +233 val_233 2008-04-08 11 +233 val_233 2008-04-08 12 +233 val_233 2008-04-08 12 +233 val_233 2008-12-31 11 +233 val_233 2008-12-31 11 +233 val_233 2008-12-31 12 +233 val_233 2008-12-31 12 +235 val_235 2008-04-08 11 +235 val_235 2008-04-08 12 +235 val_235 2008-12-31 11 +235 val_235 2008-12-31 12 +237 val_237 2008-04-08 11 +237 val_237 2008-04-08 11 +237 val_237 2008-04-08 12 +237 val_237 2008-04-08 12 +237 val_237 2008-12-31 11 +237 val_237 2008-12-31 11 +237 val_237 2008-12-31 12 +237 val_237 2008-12-31 12 +238 val_238 2008-04-08 11 +238 val_238 2008-04-08 11 +238 val_238 2008-04-08 12 +238 val_238 2008-04-08 12 +238 val_238 2008-12-31 11 +238 val_238 2008-12-31 11 +238 val_238 2008-12-31 12 +238 val_238 2008-12-31 12 +239 val_239 2008-04-08 11 +239 val_239 2008-04-08 11 +239 val_239 2008-04-08 12 +239 val_239 2008-04-08 12 +239 val_239 2008-12-31 11 +239 val_239 2008-12-31 11 +239 val_239 2008-12-31 12 +239 val_239 2008-12-31 12 +24 val_24 2008-04-08 11 +24 val_24 2008-04-08 11 +24 val_24 2008-04-08 12 +24 val_24 2008-04-08 12 +24 val_24 2008-12-31 11 +24 val_24 2008-12-31 11 +24 val_24 2008-12-31 12 +24 val_24 2008-12-31 12 +241 val_241 2008-04-08 11 +241 val_241 2008-04-08 12 +241 val_241 2008-12-31 11 +241 val_241 2008-12-31 12 +242 val_242 2008-04-08 11 +242 val_242 2008-04-08 11 +242 val_242 2008-04-08 12 +242 val_242 2008-04-08 12 +242 val_242 2008-12-31 11 +242 val_242 2008-12-31 11 +242 val_242 2008-12-31 12 +242 val_242 2008-12-31 12 +244 val_244 2008-04-08 11 +244 val_244 2008-04-08 12 +244 val_244 2008-12-31 11 +244 val_244 2008-12-31 12 +247 val_247 2008-04-08 11 +247 val_247 2008-04-08 12 +247 val_247 2008-12-31 11 +247 val_247 2008-12-31 12 +248 val_248 2008-04-08 11 +248 val_248 2008-04-08 12 +248 val_248 2008-12-31 11 +248 val_248 2008-12-31 12 +249 val_249 2008-04-08 11 +249 val_249 2008-04-08 12 +249 val_249 2008-12-31 11 +249 val_249 2008-12-31 12 +252 val_252 2008-04-08 11 +252 val_252 2008-04-08 12 +252 val_252 2008-12-31 11 +252 val_252 2008-12-31 12 +255 val_255 2008-04-08 11 +255 val_255 2008-04-08 11 +255 val_255 2008-04-08 12 +255 val_255 2008-04-08 12 +255 val_255 2008-12-31 11 +255 val_255 2008-12-31 11 +255 val_255 2008-12-31 12 +255 val_255 2008-12-31 12 +256 val_256 2008-04-08 11 +256 val_256 2008-04-08 11 +256 val_256 2008-04-08 12 +256 val_256 2008-04-08 12 +256 val_256 2008-12-31 11 +256 val_256 2008-12-31 11 +256 val_256 2008-12-31 12 +256 val_256 2008-12-31 12 +257 val_257 2008-04-08 11 +257 val_257 2008-04-08 12 +257 val_257 2008-12-31 11 +257 val_257 2008-12-31 12 +258 val_258 2008-04-08 11 +258 val_258 2008-04-08 12 +258 val_258 2008-12-31 11 +258 val_258 2008-12-31 12 +26 val_26 2008-04-08 11 +26 val_26 2008-04-08 11 +26 val_26 2008-04-08 12 +26 val_26 2008-04-08 12 +26 val_26 2008-12-31 11 +26 val_26 2008-12-31 11 +26 val_26 2008-12-31 12 +26 val_26 2008-12-31 12 +260 val_260 2008-04-08 11 +260 val_260 2008-04-08 12 +260 val_260 2008-12-31 11 +260 val_260 2008-12-31 12 +262 val_262 2008-04-08 11 +262 val_262 2008-04-08 12 +262 val_262 2008-12-31 11 +262 val_262 2008-12-31 12 +263 val_263 2008-04-08 11 +263 val_263 2008-04-08 12 +263 val_263 2008-12-31 11 +263 val_263 2008-12-31 12 +265 val_265 2008-04-08 11 +265 val_265 2008-04-08 11 +265 val_265 2008-04-08 12 +265 val_265 2008-04-08 12 +265 val_265 2008-12-31 11 +265 val_265 2008-12-31 11 +265 val_265 2008-12-31 12 +265 val_265 2008-12-31 12 +266 val_266 2008-04-08 11 +266 val_266 2008-04-08 12 +266 val_266 2008-12-31 11 +266 val_266 2008-12-31 12 +27 val_27 2008-04-08 11 +27 val_27 2008-04-08 12 +27 val_27 2008-12-31 11 +27 val_27 2008-12-31 12 +272 val_272 2008-04-08 11 +272 val_272 2008-04-08 11 +272 val_272 2008-04-08 12 +272 val_272 2008-04-08 12 +272 val_272 2008-12-31 11 +272 val_272 2008-12-31 11 +272 val_272 2008-12-31 12 +272 val_272 2008-12-31 12 +273 val_273 2008-04-08 11 +273 val_273 2008-04-08 11 +273 val_273 2008-04-08 11 +273 val_273 2008-04-08 12 +273 val_273 2008-04-08 12 +273 val_273 2008-04-08 12 +273 val_273 2008-12-31 11 +273 val_273 2008-12-31 11 +273 val_273 2008-12-31 11 +273 val_273 2008-12-31 12 +273 val_273 2008-12-31 12 +273 val_273 2008-12-31 12 +274 val_274 2008-04-08 11 +274 val_274 2008-04-08 12 +274 val_274 2008-12-31 11 +274 val_274 2008-12-31 12 +275 val_275 2008-04-08 11 +275 val_275 2008-04-08 12 +275 val_275 2008-12-31 11 +275 val_275 2008-12-31 12 +277 val_277 2008-04-08 11 +277 val_277 2008-04-08 11 +277 val_277 2008-04-08 11 +277 val_277 2008-04-08 11 +277 val_277 2008-04-08 12 +277 val_277 2008-04-08 12 +277 val_277 2008-04-08 12 +277 val_277 2008-04-08 12 +277 val_277 2008-12-31 11 +277 val_277 2008-12-31 11 +277 val_277 2008-12-31 11 +277 val_277 2008-12-31 11 +277 val_277 2008-12-31 12 +277 val_277 2008-12-31 12 +277 val_277 2008-12-31 12 +277 val_277 2008-12-31 12 +278 val_278 2008-04-08 11 +278 val_278 2008-04-08 11 +278 val_278 2008-04-08 12 +278 val_278 2008-04-08 12 +278 val_278 2008-12-31 11 +278 val_278 2008-12-31 11 +278 val_278 2008-12-31 12 +278 val_278 2008-12-31 12 +28 val_28 2008-04-08 11 +28 val_28 2008-04-08 12 +28 val_28 2008-12-31 11 +28 val_28 2008-12-31 12 +280 val_280 2008-04-08 11 +280 val_280 2008-04-08 11 +280 val_280 2008-04-08 12 +280 val_280 2008-04-08 12 +280 val_280 2008-12-31 11 +280 val_280 2008-12-31 11 +280 val_280 2008-12-31 12 +280 val_280 2008-12-31 12 +281 val_281 2008-04-08 11 +281 val_281 2008-04-08 11 +281 val_281 2008-04-08 12 +281 val_281 2008-04-08 12 +281 val_281 2008-12-31 11 +281 val_281 2008-12-31 11 +281 val_281 2008-12-31 12 +281 val_281 2008-12-31 12 +282 val_282 2008-04-08 11 +282 val_282 2008-04-08 11 +282 val_282 2008-04-08 12 +282 val_282 2008-04-08 12 +282 val_282 2008-12-31 11 +282 val_282 2008-12-31 11 +282 val_282 2008-12-31 12 +282 val_282 2008-12-31 12 +283 val_283 2008-04-08 11 +283 val_283 2008-04-08 12 +283 val_283 2008-12-31 11 +283 val_283 2008-12-31 12 +284 val_284 2008-04-08 11 +284 val_284 2008-04-08 12 +284 val_284 2008-12-31 11 +284 val_284 2008-12-31 12 +285 val_285 2008-04-08 11 +285 val_285 2008-04-08 12 +285 val_285 2008-12-31 11 +285 val_285 2008-12-31 12 +286 val_286 2008-04-08 11 +286 val_286 2008-04-08 12 +286 val_286 2008-12-31 11 +286 val_286 2008-12-31 12 +287 val_287 2008-04-08 11 +287 val_287 2008-04-08 12 +287 val_287 2008-12-31 11 +287 val_287 2008-12-31 12 +288 val_288 2008-04-08 11 +288 val_288 2008-04-08 11 +288 val_288 2008-04-08 12 +288 val_288 2008-04-08 12 +288 val_288 2008-12-31 11 +288 val_288 2008-12-31 11 +288 val_288 2008-12-31 12 +288 val_288 2008-12-31 12 +289 val_289 2008-04-08 11 +289 val_289 2008-04-08 12 +289 val_289 2008-12-31 11 +289 val_289 2008-12-31 12 +291 val_291 2008-04-08 11 +291 val_291 2008-04-08 12 +291 val_291 2008-12-31 11 +291 val_291 2008-12-31 12 +292 val_292 2008-04-08 11 +292 val_292 2008-04-08 12 +292 val_292 2008-12-31 11 +292 val_292 2008-12-31 12 +296 val_296 2008-04-08 11 +296 val_296 2008-04-08 12 +296 val_296 2008-12-31 11 +296 val_296 2008-12-31 12 +298 val_298 2008-04-08 11 +298 val_298 2008-04-08 11 +298 val_298 2008-04-08 11 +298 val_298 2008-04-08 12 +298 val_298 2008-04-08 12 +298 val_298 2008-04-08 12 +298 val_298 2008-12-31 11 +298 val_298 2008-12-31 11 +298 val_298 2008-12-31 11 +298 val_298 2008-12-31 12 +298 val_298 2008-12-31 12 +298 val_298 2008-12-31 12 +30 val_30 2008-04-08 11 +30 val_30 2008-04-08 12 +30 val_30 2008-12-31 11 +30 val_30 2008-12-31 12 +302 val_302 2008-04-08 11 +302 val_302 2008-04-08 12 +302 val_302 2008-12-31 11 +302 val_302 2008-12-31 12 +305 val_305 2008-04-08 11 +305 val_305 2008-04-08 12 +305 val_305 2008-12-31 11 +305 val_305 2008-12-31 12 +306 val_306 2008-04-08 11 +306 val_306 2008-04-08 12 +306 val_306 2008-12-31 11 +306 val_306 2008-12-31 12 +307 val_307 2008-04-08 11 +307 val_307 2008-04-08 11 +307 val_307 2008-04-08 12 +307 val_307 2008-04-08 12 +307 val_307 2008-12-31 11 +307 val_307 2008-12-31 11 +307 val_307 2008-12-31 12 +307 val_307 2008-12-31 12 +308 val_308 2008-04-08 11 +308 val_308 2008-04-08 12 +308 val_308 2008-12-31 11 +308 val_308 2008-12-31 12 +309 val_309 2008-04-08 11 +309 val_309 2008-04-08 11 +309 val_309 2008-04-08 12 +309 val_309 2008-04-08 12 +309 val_309 2008-12-31 11 +309 val_309 2008-12-31 11 +309 val_309 2008-12-31 12 +309 val_309 2008-12-31 12 +310 val_310 2008-04-08 11 +310 val_310 2008-04-08 12 +310 val_310 2008-12-31 11 +310 val_310 2008-12-31 12 +311 val_311 2008-04-08 11 +311 val_311 2008-04-08 11 +311 val_311 2008-04-08 11 +311 val_311 2008-04-08 12 +311 val_311 2008-04-08 12 +311 val_311 2008-04-08 12 +311 val_311 2008-12-31 11 +311 val_311 2008-12-31 11 +311 val_311 2008-12-31 11 +311 val_311 2008-12-31 12 +311 val_311 2008-12-31 12 +311 val_311 2008-12-31 12 +315 val_315 2008-04-08 11 +315 val_315 2008-04-08 12 +315 val_315 2008-12-31 11 +315 val_315 2008-12-31 12 +316 val_316 2008-04-08 11 +316 val_316 2008-04-08 11 +316 val_316 2008-04-08 11 +316 val_316 2008-04-08 12 +316 val_316 2008-04-08 12 +316 val_316 2008-04-08 12 +316 val_316 2008-12-31 11 +316 val_316 2008-12-31 11 +316 val_316 2008-12-31 11 +316 val_316 2008-12-31 12 +316 val_316 2008-12-31 12 +316 val_316 2008-12-31 12 +317 val_317 2008-04-08 11 +317 val_317 2008-04-08 11 +317 val_317 2008-04-08 12 +317 val_317 2008-04-08 12 +317 val_317 2008-12-31 11 +317 val_317 2008-12-31 11 +317 val_317 2008-12-31 12 +317 val_317 2008-12-31 12 +318 val_318 2008-04-08 11 +318 val_318 2008-04-08 11 +318 val_318 2008-04-08 11 +318 val_318 2008-04-08 12 +318 val_318 2008-04-08 12 +318 val_318 2008-04-08 12 +318 val_318 2008-12-31 11 +318 val_318 2008-12-31 11 +318 val_318 2008-12-31 11 +318 val_318 2008-12-31 12 +318 val_318 2008-12-31 12 +318 val_318 2008-12-31 12 +321 val_321 2008-04-08 11 +321 val_321 2008-04-08 11 +321 val_321 2008-04-08 12 +321 val_321 2008-04-08 12 +321 val_321 2008-12-31 11 +321 val_321 2008-12-31 11 +321 val_321 2008-12-31 12 +321 val_321 2008-12-31 12 +322 val_322 2008-04-08 11 +322 val_322 2008-04-08 11 +322 val_322 2008-04-08 12 +322 val_322 2008-04-08 12 +322 val_322 2008-12-31 11 +322 val_322 2008-12-31 11 +322 val_322 2008-12-31 12 +322 val_322 2008-12-31 12 +323 val_323 2008-04-08 11 +323 val_323 2008-04-08 12 +323 val_323 2008-12-31 11 +323 val_323 2008-12-31 12 +325 val_325 2008-04-08 11 +325 val_325 2008-04-08 11 +325 val_325 2008-04-08 12 +325 val_325 2008-04-08 12 +325 val_325 2008-12-31 11 +325 val_325 2008-12-31 11 +325 val_325 2008-12-31 12 +325 val_325 2008-12-31 12 +327 val_327 2008-04-08 11 +327 val_327 2008-04-08 11 +327 val_327 2008-04-08 11 +327 val_327 2008-04-08 12 +327 val_327 2008-04-08 12 +327 val_327 2008-04-08 12 +327 val_327 2008-12-31 11 +327 val_327 2008-12-31 11 +327 val_327 2008-12-31 11 +327 val_327 2008-12-31 12 +327 val_327 2008-12-31 12 +327 val_327 2008-12-31 12 +33 val_33 2008-04-08 11 +33 val_33 2008-04-08 12 +33 val_33 2008-12-31 11 +33 val_33 2008-12-31 12 +331 val_331 2008-04-08 11 +331 val_331 2008-04-08 11 +331 val_331 2008-04-08 12 +331 val_331 2008-04-08 12 +331 val_331 2008-12-31 11 +331 val_331 2008-12-31 11 +331 val_331 2008-12-31 12 +331 val_331 2008-12-31 12 +332 val_332 2008-04-08 11 +332 val_332 2008-04-08 12 +332 val_332 2008-12-31 11 +332 val_332 2008-12-31 12 +333 val_333 2008-04-08 11 +333 val_333 2008-04-08 11 +333 val_333 2008-04-08 12 +333 val_333 2008-04-08 12 +333 val_333 2008-12-31 11 +333 val_333 2008-12-31 11 +333 val_333 2008-12-31 12 +333 val_333 2008-12-31 12 +335 val_335 2008-04-08 11 +335 val_335 2008-04-08 12 +335 val_335 2008-12-31 11 +335 val_335 2008-12-31 12 +336 val_336 2008-04-08 11 +336 val_336 2008-04-08 12 +336 val_336 2008-12-31 11 +336 val_336 2008-12-31 12 +338 val_338 2008-04-08 11 +338 val_338 2008-04-08 12 +338 val_338 2008-12-31 11 +338 val_338 2008-12-31 12 +339 val_339 2008-04-08 11 +339 val_339 2008-04-08 12 +339 val_339 2008-12-31 11 +339 val_339 2008-12-31 12 +34 val_34 2008-04-08 11 +34 val_34 2008-04-08 12 +34 val_34 2008-12-31 11 +34 val_34 2008-12-31 12 +341 val_341 2008-04-08 11 +341 val_341 2008-04-08 12 +341 val_341 2008-12-31 11 +341 val_341 2008-12-31 12 +342 val_342 2008-04-08 11 +342 val_342 2008-04-08 11 +342 val_342 2008-04-08 12 +342 val_342 2008-04-08 12 +342 val_342 2008-12-31 11 +342 val_342 2008-12-31 11 +342 val_342 2008-12-31 12 +342 val_342 2008-12-31 12 +344 val_344 2008-04-08 11 +344 val_344 2008-04-08 11 +344 val_344 2008-04-08 12 +344 val_344 2008-04-08 12 +344 val_344 2008-12-31 11 +344 val_344 2008-12-31 11 +344 val_344 2008-12-31 12 +344 val_344 2008-12-31 12 +345 val_345 2008-04-08 11 +345 val_345 2008-04-08 12 +345 val_345 2008-12-31 11 +345 val_345 2008-12-31 12 +348 val_348 2008-04-08 11 +348 val_348 2008-04-08 11 +348 val_348 2008-04-08 11 +348 val_348 2008-04-08 11 +348 val_348 2008-04-08 11 +348 val_348 2008-04-08 12 +348 val_348 2008-04-08 12 +348 val_348 2008-04-08 12 +348 val_348 2008-04-08 12 +348 val_348 2008-04-08 12 +348 val_348 2008-12-31 11 +348 val_348 2008-12-31 11 +348 val_348 2008-12-31 11 +348 val_348 2008-12-31 11 +348 val_348 2008-12-31 11 +348 val_348 2008-12-31 12 +348 val_348 2008-12-31 12 +348 val_348 2008-12-31 12 +348 val_348 2008-12-31 12 +348 val_348 2008-12-31 12 +35 val_35 2008-04-08 11 +35 val_35 2008-04-08 11 +35 val_35 2008-04-08 11 +35 val_35 2008-04-08 12 +35 val_35 2008-04-08 12 +35 val_35 2008-04-08 12 +35 val_35 2008-12-31 11 +35 val_35 2008-12-31 11 +35 val_35 2008-12-31 11 +35 val_35 2008-12-31 12 +35 val_35 2008-12-31 12 +35 val_35 2008-12-31 12 +351 val_351 2008-04-08 11 +351 val_351 2008-04-08 12 +351 val_351 2008-12-31 11 +351 val_351 2008-12-31 12 +353 val_353 2008-04-08 11 +353 val_353 2008-04-08 11 +353 val_353 2008-04-08 12 +353 val_353 2008-04-08 12 +353 val_353 2008-12-31 11 +353 val_353 2008-12-31 11 +353 val_353 2008-12-31 12 +353 val_353 2008-12-31 12 +356 val_356 2008-04-08 11 +356 val_356 2008-04-08 12 +356 val_356 2008-12-31 11 +356 val_356 2008-12-31 12 +360 val_360 2008-04-08 11 +360 val_360 2008-04-08 12 +360 val_360 2008-12-31 11 +360 val_360 2008-12-31 12 +362 val_362 2008-04-08 11 +362 val_362 2008-04-08 12 +362 val_362 2008-12-31 11 +362 val_362 2008-12-31 12 +364 val_364 2008-04-08 11 +364 val_364 2008-04-08 12 +364 val_364 2008-12-31 11 +364 val_364 2008-12-31 12 +365 val_365 2008-04-08 11 +365 val_365 2008-04-08 12 +365 val_365 2008-12-31 11 +365 val_365 2008-12-31 12 +366 val_366 2008-04-08 11 +366 val_366 2008-04-08 12 +366 val_366 2008-12-31 11 +366 val_366 2008-12-31 12 +367 val_367 2008-04-08 11 +367 val_367 2008-04-08 11 +367 val_367 2008-04-08 12 +367 val_367 2008-04-08 12 +367 val_367 2008-12-31 11 +367 val_367 2008-12-31 11 +367 val_367 2008-12-31 12 +367 val_367 2008-12-31 12 +368 val_368 2008-04-08 11 +368 val_368 2008-04-08 12 +368 val_368 2008-12-31 11 +368 val_368 2008-12-31 12 +369 val_369 2008-04-08 11 +369 val_369 2008-04-08 11 +369 val_369 2008-04-08 11 +369 val_369 2008-04-08 12 +369 val_369 2008-04-08 12 +369 val_369 2008-04-08 12 +369 val_369 2008-12-31 11 +369 val_369 2008-12-31 11 +369 val_369 2008-12-31 11 +369 val_369 2008-12-31 12 +369 val_369 2008-12-31 12 +369 val_369 2008-12-31 12 +37 val_37 2008-04-08 11 +37 val_37 2008-04-08 11 +37 val_37 2008-04-08 12 +37 val_37 2008-04-08 12 +37 val_37 2008-12-31 11 +37 val_37 2008-12-31 11 +37 val_37 2008-12-31 12 +37 val_37 2008-12-31 12 +373 val_373 2008-04-08 11 +373 val_373 2008-04-08 12 +373 val_373 2008-12-31 11 +373 val_373 2008-12-31 12 +374 val_374 2008-04-08 11 +374 val_374 2008-04-08 12 +374 val_374 2008-12-31 11 +374 val_374 2008-12-31 12 +375 val_375 2008-04-08 11 +375 val_375 2008-04-08 12 +375 val_375 2008-12-31 11 +375 val_375 2008-12-31 12 +377 val_377 2008-04-08 11 +377 val_377 2008-04-08 12 +377 val_377 2008-12-31 11 +377 val_377 2008-12-31 12 +378 val_378 2008-04-08 11 +378 val_378 2008-04-08 12 +378 val_378 2008-12-31 11 +378 val_378 2008-12-31 12 +379 val_379 2008-04-08 11 +379 val_379 2008-04-08 12 +379 val_379 2008-12-31 11 +379 val_379 2008-12-31 12 +382 val_382 2008-04-08 11 +382 val_382 2008-04-08 11 +382 val_382 2008-04-08 12 +382 val_382 2008-04-08 12 +382 val_382 2008-12-31 11 +382 val_382 2008-12-31 11 +382 val_382 2008-12-31 12 +382 val_382 2008-12-31 12 +384 val_384 2008-04-08 11 +384 val_384 2008-04-08 11 +384 val_384 2008-04-08 11 +384 val_384 2008-04-08 12 +384 val_384 2008-04-08 12 +384 val_384 2008-04-08 12 +384 val_384 2008-12-31 11 +384 val_384 2008-12-31 11 +384 val_384 2008-12-31 11 +384 val_384 2008-12-31 12 +384 val_384 2008-12-31 12 +384 val_384 2008-12-31 12 +386 val_386 2008-04-08 11 +386 val_386 2008-04-08 12 +386 val_386 2008-12-31 11 +386 val_386 2008-12-31 12 +389 val_389 2008-04-08 11 +389 val_389 2008-04-08 12 +389 val_389 2008-12-31 11 +389 val_389 2008-12-31 12 +392 val_392 2008-04-08 11 +392 val_392 2008-04-08 12 +392 val_392 2008-12-31 11 +392 val_392 2008-12-31 12 +393 val_393 2008-04-08 11 +393 val_393 2008-04-08 12 +393 val_393 2008-12-31 11 +393 val_393 2008-12-31 12 +394 val_394 2008-04-08 11 +394 val_394 2008-04-08 12 +394 val_394 2008-12-31 11 +394 val_394 2008-12-31 12 +395 val_395 2008-04-08 11 +395 val_395 2008-04-08 11 +395 val_395 2008-04-08 12 +395 val_395 2008-04-08 12 +395 val_395 2008-12-31 11 +395 val_395 2008-12-31 11 +395 val_395 2008-12-31 12 +395 val_395 2008-12-31 12 +396 val_396 2008-04-08 11 +396 val_396 2008-04-08 11 +396 val_396 2008-04-08 11 +396 val_396 2008-04-08 12 +396 val_396 2008-04-08 12 +396 val_396 2008-04-08 12 +396 val_396 2008-12-31 11 +396 val_396 2008-12-31 11 +396 val_396 2008-12-31 11 +396 val_396 2008-12-31 12 +396 val_396 2008-12-31 12 +396 val_396 2008-12-31 12 +397 val_397 2008-04-08 11 +397 val_397 2008-04-08 11 +397 val_397 2008-04-08 12 +397 val_397 2008-04-08 12 +397 val_397 2008-12-31 11 +397 val_397 2008-12-31 11 +397 val_397 2008-12-31 12 +397 val_397 2008-12-31 12 +399 val_399 2008-04-08 11 +399 val_399 2008-04-08 11 +399 val_399 2008-04-08 12 +399 val_399 2008-04-08 12 +399 val_399 2008-12-31 11 +399 val_399 2008-12-31 11 +399 val_399 2008-12-31 12 +399 val_399 2008-12-31 12 +4 val_4 2008-04-08 11 +4 val_4 2008-04-08 12 +4 val_4 2008-12-31 11 +4 val_4 2008-12-31 12 +400 val_400 2008-04-08 11 +400 val_400 2008-04-08 12 +400 val_400 2008-12-31 11 +400 val_400 2008-12-31 12 +401 val_401 2008-04-08 11 +401 val_401 2008-04-08 11 +401 val_401 2008-04-08 11 +401 val_401 2008-04-08 11 +401 val_401 2008-04-08 11 +401 val_401 2008-04-08 12 +401 val_401 2008-04-08 12 +401 val_401 2008-04-08 12 +401 val_401 2008-04-08 12 +401 val_401 2008-04-08 12 +401 val_401 2008-12-31 11 +401 val_401 2008-12-31 11 +401 val_401 2008-12-31 11 +401 val_401 2008-12-31 11 +401 val_401 2008-12-31 11 +401 val_401 2008-12-31 12 +401 val_401 2008-12-31 12 +401 val_401 2008-12-31 12 +401 val_401 2008-12-31 12 +401 val_401 2008-12-31 12 +402 val_402 2008-04-08 11 +402 val_402 2008-04-08 12 +402 val_402 2008-12-31 11 +402 val_402 2008-12-31 12 +403 val_403 2008-04-08 11 +403 val_403 2008-04-08 11 +403 val_403 2008-04-08 11 +403 val_403 2008-04-08 12 +403 val_403 2008-04-08 12 +403 val_403 2008-04-08 12 +403 val_403 2008-12-31 11 +403 val_403 2008-12-31 11 +403 val_403 2008-12-31 11 +403 val_403 2008-12-31 12 +403 val_403 2008-12-31 12 +403 val_403 2008-12-31 12 +404 val_404 2008-04-08 11 +404 val_404 2008-04-08 11 +404 val_404 2008-04-08 12 +404 val_404 2008-04-08 12 +404 val_404 2008-12-31 11 +404 val_404 2008-12-31 11 +404 val_404 2008-12-31 12 +404 val_404 2008-12-31 12 +406 val_406 2008-04-08 11 +406 val_406 2008-04-08 11 +406 val_406 2008-04-08 11 +406 val_406 2008-04-08 11 +406 val_406 2008-04-08 12 +406 val_406 2008-04-08 12 +406 val_406 2008-04-08 12 +406 val_406 2008-04-08 12 +406 val_406 2008-12-31 11 +406 val_406 2008-12-31 11 +406 val_406 2008-12-31 11 +406 val_406 2008-12-31 11 +406 val_406 2008-12-31 12 +406 val_406 2008-12-31 12 +406 val_406 2008-12-31 12 +406 val_406 2008-12-31 12 +407 val_407 2008-04-08 11 +407 val_407 2008-04-08 12 +407 val_407 2008-12-31 11 +407 val_407 2008-12-31 12 +409 val_409 2008-04-08 11 +409 val_409 2008-04-08 11 +409 val_409 2008-04-08 11 +409 val_409 2008-04-08 12 +409 val_409 2008-04-08 12 +409 val_409 2008-04-08 12 +409 val_409 2008-12-31 11 +409 val_409 2008-12-31 11 +409 val_409 2008-12-31 11 +409 val_409 2008-12-31 12 +409 val_409 2008-12-31 12 +409 val_409 2008-12-31 12 +41 val_41 2008-04-08 11 +41 val_41 2008-04-08 12 +41 val_41 2008-12-31 11 +41 val_41 2008-12-31 12 +411 val_411 2008-04-08 11 +411 val_411 2008-04-08 12 +411 val_411 2008-12-31 11 +411 val_411 2008-12-31 12 +413 val_413 2008-04-08 11 +413 val_413 2008-04-08 11 +413 val_413 2008-04-08 12 +413 val_413 2008-04-08 12 +413 val_413 2008-12-31 11 +413 val_413 2008-12-31 11 +413 val_413 2008-12-31 12 +413 val_413 2008-12-31 12 +414 val_414 2008-04-08 11 +414 val_414 2008-04-08 11 +414 val_414 2008-04-08 12 +414 val_414 2008-04-08 12 +414 val_414 2008-12-31 11 +414 val_414 2008-12-31 11 +414 val_414 2008-12-31 12 +414 val_414 2008-12-31 12 +417 val_417 2008-04-08 11 +417 val_417 2008-04-08 11 +417 val_417 2008-04-08 11 +417 val_417 2008-04-08 12 +417 val_417 2008-04-08 12 +417 val_417 2008-04-08 12 +417 val_417 2008-12-31 11 +417 val_417 2008-12-31 11 +417 val_417 2008-12-31 11 +417 val_417 2008-12-31 12 +417 val_417 2008-12-31 12 +417 val_417 2008-12-31 12 +418 val_418 2008-04-08 11 +418 val_418 2008-04-08 12 +418 val_418 2008-12-31 11 +418 val_418 2008-12-31 12 +419 val_419 2008-04-08 11 +419 val_419 2008-04-08 12 +419 val_419 2008-12-31 11 +419 val_419 2008-12-31 12 +42 val_42 2008-04-08 11 +42 val_42 2008-04-08 11 +42 val_42 2008-04-08 12 +42 val_42 2008-04-08 12 +42 val_42 2008-12-31 11 +42 val_42 2008-12-31 11 +42 val_42 2008-12-31 12 +42 val_42 2008-12-31 12 +421 val_421 2008-04-08 11 +421 val_421 2008-04-08 12 +421 val_421 2008-12-31 11 +421 val_421 2008-12-31 12 +424 val_424 2008-04-08 11 +424 val_424 2008-04-08 11 +424 val_424 2008-04-08 12 +424 val_424 2008-04-08 12 +424 val_424 2008-12-31 11 +424 val_424 2008-12-31 11 +424 val_424 2008-12-31 12 +424 val_424 2008-12-31 12 +427 val_427 2008-04-08 11 +427 val_427 2008-04-08 12 +427 val_427 2008-12-31 11 +427 val_427 2008-12-31 12 +429 val_429 2008-04-08 11 +429 val_429 2008-04-08 11 +429 val_429 2008-04-08 12 +429 val_429 2008-04-08 12 +429 val_429 2008-12-31 11 +429 val_429 2008-12-31 11 +429 val_429 2008-12-31 12 +429 val_429 2008-12-31 12 +43 val_43 2008-04-08 11 +43 val_43 2008-04-08 12 +43 val_43 2008-12-31 11 +43 val_43 2008-12-31 12 +430 val_430 2008-04-08 11 +430 val_430 2008-04-08 11 +430 val_430 2008-04-08 11 +430 val_430 2008-04-08 12 +430 val_430 2008-04-08 12 +430 val_430 2008-04-08 12 +430 val_430 2008-12-31 11 +430 val_430 2008-12-31 11 +430 val_430 2008-12-31 11 +430 val_430 2008-12-31 12 +430 val_430 2008-12-31 12 +430 val_430 2008-12-31 12 +431 val_431 2008-04-08 11 +431 val_431 2008-04-08 11 +431 val_431 2008-04-08 11 +431 val_431 2008-04-08 12 +431 val_431 2008-04-08 12 +431 val_431 2008-04-08 12 +431 val_431 2008-12-31 11 +431 val_431 2008-12-31 11 +431 val_431 2008-12-31 11 +431 val_431 2008-12-31 12 +431 val_431 2008-12-31 12 +431 val_431 2008-12-31 12 +432 val_432 2008-04-08 11 +432 val_432 2008-04-08 12 +432 val_432 2008-12-31 11 +432 val_432 2008-12-31 12 +435 val_435 2008-04-08 11 +435 val_435 2008-04-08 12 +435 val_435 2008-12-31 11 +435 val_435 2008-12-31 12 +436 val_436 2008-04-08 11 +436 val_436 2008-04-08 12 +436 val_436 2008-12-31 11 +436 val_436 2008-12-31 12 +437 val_437 2008-04-08 11 +437 val_437 2008-04-08 12 +437 val_437 2008-12-31 11 +437 val_437 2008-12-31 12 +438 val_438 2008-04-08 11 +438 val_438 2008-04-08 11 +438 val_438 2008-04-08 11 +438 val_438 2008-04-08 12 +438 val_438 2008-04-08 12 +438 val_438 2008-04-08 12 +438 val_438 2008-12-31 11 +438 val_438 2008-12-31 11 +438 val_438 2008-12-31 11 +438 val_438 2008-12-31 12 +438 val_438 2008-12-31 12 +438 val_438 2008-12-31 12 +439 val_439 2008-04-08 11 +439 val_439 2008-04-08 11 +439 val_439 2008-04-08 12 +439 val_439 2008-04-08 12 +439 val_439 2008-12-31 11 +439 val_439 2008-12-31 11 +439 val_439 2008-12-31 12 +439 val_439 2008-12-31 12 +44 val_44 2008-04-08 11 +44 val_44 2008-04-08 12 +44 val_44 2008-12-31 11 +44 val_44 2008-12-31 12 +443 val_443 2008-04-08 11 +443 val_443 2008-04-08 12 +443 val_443 2008-12-31 11 +443 val_443 2008-12-31 12 +444 val_444 2008-04-08 11 +444 val_444 2008-04-08 12 +444 val_444 2008-12-31 11 +444 val_444 2008-12-31 12 +446 val_446 2008-04-08 11 +446 val_446 2008-04-08 12 +446 val_446 2008-12-31 11 +446 val_446 2008-12-31 12 +448 val_448 2008-04-08 11 +448 val_448 2008-04-08 12 +448 val_448 2008-12-31 11 +448 val_448 2008-12-31 12 +449 val_449 2008-04-08 11 +449 val_449 2008-04-08 12 +449 val_449 2008-12-31 11 +449 val_449 2008-12-31 12 +452 val_452 2008-04-08 11 +452 val_452 2008-04-08 12 +452 val_452 2008-12-31 11 +452 val_452 2008-12-31 12 +453 val_453 2008-04-08 11 +453 val_453 2008-04-08 12 +453 val_453 2008-12-31 11 +453 val_453 2008-12-31 12 +454 val_454 2008-04-08 11 +454 val_454 2008-04-08 11 +454 val_454 2008-04-08 11 +454 val_454 2008-04-08 12 +454 val_454 2008-04-08 12 +454 val_454 2008-04-08 12 +454 val_454 2008-12-31 11 +454 val_454 2008-12-31 11 +454 val_454 2008-12-31 11 +454 val_454 2008-12-31 12 +454 val_454 2008-12-31 12 +454 val_454 2008-12-31 12 +455 val_455 2008-04-08 11 +455 val_455 2008-04-08 12 +455 val_455 2008-12-31 11 +455 val_455 2008-12-31 12 +457 val_457 2008-04-08 11 +457 val_457 2008-04-08 12 +457 val_457 2008-12-31 11 +457 val_457 2008-12-31 12 +458 val_458 2008-04-08 11 +458 val_458 2008-04-08 11 +458 val_458 2008-04-08 12 +458 val_458 2008-04-08 12 +458 val_458 2008-12-31 11 +458 val_458 2008-12-31 11 +458 val_458 2008-12-31 12 +458 val_458 2008-12-31 12 +459 val_459 2008-04-08 11 +459 val_459 2008-04-08 11 +459 val_459 2008-04-08 12 +459 val_459 2008-04-08 12 +459 val_459 2008-12-31 11 +459 val_459 2008-12-31 11 +459 val_459 2008-12-31 12 +459 val_459 2008-12-31 12 +460 val_460 2008-04-08 11 +460 val_460 2008-04-08 12 +460 val_460 2008-12-31 11 +460 val_460 2008-12-31 12 +462 val_462 2008-04-08 11 +462 val_462 2008-04-08 11 +462 val_462 2008-04-08 12 +462 val_462 2008-04-08 12 +462 val_462 2008-12-31 11 +462 val_462 2008-12-31 11 +462 val_462 2008-12-31 12 +462 val_462 2008-12-31 12 +463 val_463 2008-04-08 11 +463 val_463 2008-04-08 11 +463 val_463 2008-04-08 12 +463 val_463 2008-04-08 12 +463 val_463 2008-12-31 11 +463 val_463 2008-12-31 11 +463 val_463 2008-12-31 12 +463 val_463 2008-12-31 12 +466 val_466 2008-04-08 11 +466 val_466 2008-04-08 11 +466 val_466 2008-04-08 11 +466 val_466 2008-04-08 12 +466 val_466 2008-04-08 12 +466 val_466 2008-04-08 12 +466 val_466 2008-12-31 11 +466 val_466 2008-12-31 11 +466 val_466 2008-12-31 11 +466 val_466 2008-12-31 12 +466 val_466 2008-12-31 12 +466 val_466 2008-12-31 12 +467 val_467 2008-04-08 11 +467 val_467 2008-04-08 12 +467 val_467 2008-12-31 11 +467 val_467 2008-12-31 12 +468 val_468 2008-04-08 11 +468 val_468 2008-04-08 11 +468 val_468 2008-04-08 11 +468 val_468 2008-04-08 11 +468 val_468 2008-04-08 12 +468 val_468 2008-04-08 12 +468 val_468 2008-04-08 12 +468 val_468 2008-04-08 12 +468 val_468 2008-12-31 11 +468 val_468 2008-12-31 11 +468 val_468 2008-12-31 11 +468 val_468 2008-12-31 11 +468 val_468 2008-12-31 12 +468 val_468 2008-12-31 12 +468 val_468 2008-12-31 12 +468 val_468 2008-12-31 12 +469 val_469 2008-04-08 11 +469 val_469 2008-04-08 11 +469 val_469 2008-04-08 11 +469 val_469 2008-04-08 11 +469 val_469 2008-04-08 11 +469 val_469 2008-04-08 12 +469 val_469 2008-04-08 12 +469 val_469 2008-04-08 12 +469 val_469 2008-04-08 12 +469 val_469 2008-04-08 12 +469 val_469 2008-12-31 11 +469 val_469 2008-12-31 11 +469 val_469 2008-12-31 11 +469 val_469 2008-12-31 11 +469 val_469 2008-12-31 11 +469 val_469 2008-12-31 12 +469 val_469 2008-12-31 12 +469 val_469 2008-12-31 12 +469 val_469 2008-12-31 12 +469 val_469 2008-12-31 12 +47 val_47 2008-04-08 11 +47 val_47 2008-04-08 12 +47 val_47 2008-12-31 11 +47 val_47 2008-12-31 12 +470 val_470 2008-04-08 11 +470 val_470 2008-04-08 12 +470 val_470 2008-12-31 11 +470 val_470 2008-12-31 12 +472 val_472 2008-04-08 11 +472 val_472 2008-04-08 12 +472 val_472 2008-12-31 11 +472 val_472 2008-12-31 12 +475 val_475 2008-04-08 11 +475 val_475 2008-04-08 12 +475 val_475 2008-12-31 11 +475 val_475 2008-12-31 12 +477 val_477 2008-04-08 11 +477 val_477 2008-04-08 12 +477 val_477 2008-12-31 11 +477 val_477 2008-12-31 12 +478 val_478 2008-04-08 11 +478 val_478 2008-04-08 11 +478 val_478 2008-04-08 12 +478 val_478 2008-04-08 12 +478 val_478 2008-12-31 11 +478 val_478 2008-12-31 11 +478 val_478 2008-12-31 12 +478 val_478 2008-12-31 12 +479 val_479 2008-04-08 11 +479 val_479 2008-04-08 12 +479 val_479 2008-12-31 11 +479 val_479 2008-12-31 12 +480 val_480 2008-04-08 11 +480 val_480 2008-04-08 11 +480 val_480 2008-04-08 11 +480 val_480 2008-04-08 12 +480 val_480 2008-04-08 12 +480 val_480 2008-04-08 12 +480 val_480 2008-12-31 11 +480 val_480 2008-12-31 11 +480 val_480 2008-12-31 11 +480 val_480 2008-12-31 12 +480 val_480 2008-12-31 12 +480 val_480 2008-12-31 12 +481 val_481 2008-04-08 11 +481 val_481 2008-04-08 12 +481 val_481 2008-12-31 11 +481 val_481 2008-12-31 12 +482 val_482 2008-04-08 11 +482 val_482 2008-04-08 12 +482 val_482 2008-12-31 11 +482 val_482 2008-12-31 12 +483 val_483 2008-04-08 11 +483 val_483 2008-04-08 12 +483 val_483 2008-12-31 11 +483 val_483 2008-12-31 12 +484 val_484 2008-04-08 11 +484 val_484 2008-04-08 12 +484 val_484 2008-12-31 11 +484 val_484 2008-12-31 12 +485 val_485 2008-04-08 11 +485 val_485 2008-04-08 12 +485 val_485 2008-12-31 11 +485 val_485 2008-12-31 12 +487 val_487 2008-04-08 11 +487 val_487 2008-04-08 12 +487 val_487 2008-12-31 11 +487 val_487 2008-12-31 12 +489 val_489 2008-04-08 11 +489 val_489 2008-04-08 11 +489 val_489 2008-04-08 11 +489 val_489 2008-04-08 11 +489 val_489 2008-04-08 12 +489 val_489 2008-04-08 12 +489 val_489 2008-04-08 12 +489 val_489 2008-04-08 12 +489 val_489 2008-12-31 11 +489 val_489 2008-12-31 11 +489 val_489 2008-12-31 11 +489 val_489 2008-12-31 11 +489 val_489 2008-12-31 12 +489 val_489 2008-12-31 12 +489 val_489 2008-12-31 12 +489 val_489 2008-12-31 12 +490 val_490 2008-04-08 11 +490 val_490 2008-04-08 12 +490 val_490 2008-12-31 11 +490 val_490 2008-12-31 12 +491 val_491 2008-04-08 11 +491 val_491 2008-04-08 12 +491 val_491 2008-12-31 11 +491 val_491 2008-12-31 12 +492 val_492 2008-04-08 11 +492 val_492 2008-04-08 11 +492 val_492 2008-04-08 12 +492 val_492 2008-04-08 12 +492 val_492 2008-12-31 11 +492 val_492 2008-12-31 11 +492 val_492 2008-12-31 12 +492 val_492 2008-12-31 12 +493 val_493 2008-04-08 11 +493 val_493 2008-04-08 12 +493 val_493 2008-12-31 11 +493 val_493 2008-12-31 12 +494 val_494 2008-04-08 11 +494 val_494 2008-04-08 12 +494 val_494 2008-12-31 11 +494 val_494 2008-12-31 12 +495 val_495 2008-04-08 11 +495 val_495 2008-04-08 12 +495 val_495 2008-12-31 11 +495 val_495 2008-12-31 12 +496 val_496 2008-04-08 11 +496 val_496 2008-04-08 12 +496 val_496 2008-12-31 11 +496 val_496 2008-12-31 12 +497 val_497 2008-04-08 11 +497 val_497 2008-04-08 12 +497 val_497 2008-12-31 11 +497 val_497 2008-12-31 12 +498 val_498 2008-04-08 11 +498 val_498 2008-04-08 11 +498 val_498 2008-04-08 11 +498 val_498 2008-04-08 12 +498 val_498 2008-04-08 12 +498 val_498 2008-04-08 12 +498 val_498 2008-12-31 11 +498 val_498 2008-12-31 11 +498 val_498 2008-12-31 11 +498 val_498 2008-12-31 12 +498 val_498 2008-12-31 12 +498 val_498 2008-12-31 12 +5 val_5 2008-04-08 11 +5 val_5 2008-04-08 11 +5 val_5 2008-04-08 11 +5 val_5 2008-04-08 12 +5 val_5 2008-04-08 12 +5 val_5 2008-04-08 12 +5 val_5 2008-12-31 11 +5 val_5 2008-12-31 11 +5 val_5 2008-12-31 11 +5 val_5 2008-12-31 12 +5 val_5 2008-12-31 12 +5 val_5 2008-12-31 12 +51 val_51 2008-04-08 11 +51 val_51 2008-04-08 11 +51 val_51 2008-04-08 12 +51 val_51 2008-04-08 12 +51 val_51 2008-12-31 11 +51 val_51 2008-12-31 11 +51 val_51 2008-12-31 12 +51 val_51 2008-12-31 12 +53 val_53 2008-04-08 11 +53 val_53 2008-04-08 12 +53 val_53 2008-12-31 11 +53 val_53 2008-12-31 12 +54 val_54 2008-04-08 11 +54 val_54 2008-04-08 12 +54 val_54 2008-12-31 11 +54 val_54 2008-12-31 12 +57 val_57 2008-04-08 11 +57 val_57 2008-04-08 12 +57 val_57 2008-12-31 11 +57 val_57 2008-12-31 12 +58 val_58 2008-04-08 11 +58 val_58 2008-04-08 11 +58 val_58 2008-04-08 12 +58 val_58 2008-04-08 12 +58 val_58 2008-12-31 11 +58 val_58 2008-12-31 11 +58 val_58 2008-12-31 12 +58 val_58 2008-12-31 12 +64 val_64 2008-04-08 11 +64 val_64 2008-04-08 12 +64 val_64 2008-12-31 11 +64 val_64 2008-12-31 12 +65 val_65 2008-04-08 11 +65 val_65 2008-04-08 12 +65 val_65 2008-12-31 11 +65 val_65 2008-12-31 12 +66 val_66 2008-04-08 11 +66 val_66 2008-04-08 12 +66 val_66 2008-12-31 11 +66 val_66 2008-12-31 12 +67 val_67 2008-04-08 11 +67 val_67 2008-04-08 11 +67 val_67 2008-04-08 12 +67 val_67 2008-04-08 12 +67 val_67 2008-12-31 11 +67 val_67 2008-12-31 11 +67 val_67 2008-12-31 12 +67 val_67 2008-12-31 12 +69 val_69 2008-04-08 11 +69 val_69 2008-04-08 12 +69 val_69 2008-12-31 11 +69 val_69 2008-12-31 12 +70 val_70 2008-04-08 11 +70 val_70 2008-04-08 11 +70 val_70 2008-04-08 11 +70 val_70 2008-04-08 12 +70 val_70 2008-04-08 12 +70 val_70 2008-04-08 12 +70 val_70 2008-12-31 11 +70 val_70 2008-12-31 11 +70 val_70 2008-12-31 11 +70 val_70 2008-12-31 12 +70 val_70 2008-12-31 12 +70 val_70 2008-12-31 12 +72 val_72 2008-04-08 11 +72 val_72 2008-04-08 11 +72 val_72 2008-04-08 12 +72 val_72 2008-04-08 12 +72 val_72 2008-12-31 11 +72 val_72 2008-12-31 11 +72 val_72 2008-12-31 12 +72 val_72 2008-12-31 12 +74 val_74 2008-04-08 11 +74 val_74 2008-04-08 12 +74 val_74 2008-12-31 11 +74 val_74 2008-12-31 12 +76 val_76 2008-04-08 11 +76 val_76 2008-04-08 11 +76 val_76 2008-04-08 12 +76 val_76 2008-04-08 12 +76 val_76 2008-12-31 11 +76 val_76 2008-12-31 11 +76 val_76 2008-12-31 12 +76 val_76 2008-12-31 12 +77 val_77 2008-04-08 11 +77 val_77 2008-04-08 12 +77 val_77 2008-12-31 11 +77 val_77 2008-12-31 12 +78 val_78 2008-04-08 11 +78 val_78 2008-04-08 12 +78 val_78 2008-12-31 11 +78 val_78 2008-12-31 12 +8 val_8 2008-04-08 11 +8 val_8 2008-04-08 12 +8 val_8 2008-12-31 11 +8 val_8 2008-12-31 12 +80 val_80 2008-04-08 11 +80 val_80 2008-04-08 12 +80 val_80 2008-12-31 11 +80 val_80 2008-12-31 12 +82 val_82 2008-04-08 11 +82 val_82 2008-04-08 12 +82 val_82 2008-12-31 11 +82 val_82 2008-12-31 12 +83 val_83 2008-04-08 11 +83 val_83 2008-04-08 11 +83 val_83 2008-04-08 12 +83 val_83 2008-04-08 12 +83 val_83 2008-12-31 11 +83 val_83 2008-12-31 11 +83 val_83 2008-12-31 12 +83 val_83 2008-12-31 12 +84 val_84 2008-04-08 11 +84 val_84 2008-04-08 11 +84 val_84 2008-04-08 12 +84 val_84 2008-04-08 12 +84 val_84 2008-12-31 11 +84 val_84 2008-12-31 11 +84 val_84 2008-12-31 12 +84 val_84 2008-12-31 12 +85 val_85 2008-04-08 11 +85 val_85 2008-04-08 12 +85 val_85 2008-12-31 11 +85 val_85 2008-12-31 12 +86 val_86 2008-04-08 11 +86 val_86 2008-04-08 12 +86 val_86 2008-12-31 11 +86 val_86 2008-12-31 12 +87 val_87 2008-04-08 11 +87 val_87 2008-04-08 12 +87 val_87 2008-12-31 11 +87 val_87 2008-12-31 12 +9 val_9 2008-04-08 11 +9 val_9 2008-04-08 12 +9 val_9 2008-12-31 11 +9 val_9 2008-12-31 12 +90 val_90 2008-04-08 11 +90 val_90 2008-04-08 11 +90 val_90 2008-04-08 11 +90 val_90 2008-04-08 12 +90 val_90 2008-04-08 12 +90 val_90 2008-04-08 12 +90 val_90 2008-12-31 11 +90 val_90 2008-12-31 11 +90 val_90 2008-12-31 11 +90 val_90 2008-12-31 12 +90 val_90 2008-12-31 12 +90 val_90 2008-12-31 12 +92 val_92 2008-04-08 11 +92 val_92 2008-04-08 12 +92 val_92 2008-12-31 11 +92 val_92 2008-12-31 12 +95 val_95 2008-04-08 11 +95 val_95 2008-04-08 11 +95 val_95 2008-04-08 12 +95 val_95 2008-04-08 12 +95 val_95 2008-12-31 11 +95 val_95 2008-12-31 11 +95 val_95 2008-12-31 12 +95 val_95 2008-12-31 12 +96 val_96 2008-04-08 11 +96 val_96 2008-04-08 12 +96 val_96 2008-12-31 11 +96 val_96 2008-12-31 12 +97 val_97 2008-04-08 11 +97 val_97 2008-04-08 11 +97 val_97 2008-04-08 12 +97 val_97 2008-04-08 12 +97 val_97 2008-12-31 11 +97 val_97 2008-12-31 11 +97 val_97 2008-12-31 12 +97 val_97 2008-12-31 12 +98 val_98 2008-04-08 11 +98 val_98 2008-04-08 11 +98 val_98 2008-04-08 12 +98 val_98 2008-04-08 12 +98 val_98 2008-12-31 11 +98 val_98 2008-12-31 11 +98 val_98 2008-12-31 12 +98 val_98 2008-12-31 12 diff --git a/ql/src/test/results/clientpositive/tez/autoColumnStats_2.q.out b/ql/src/test/results/clientpositive/tez/autoColumnStats_2.q.out new file mode 100644 index 0000000..b3a1211 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/autoColumnStats_2.q.out @@ -0,0 +1,1435 @@ +PREHOOK: query: drop table src_multi1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table src_multi1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table src_multi1 like src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_multi1 +POSTHOOK: query: create table src_multi1 like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_multi1 +PREHOOK: query: insert into table src_multi1 select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@src_multi1 +POSTHOOK: query: insert into table src_multi1 select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_multi1 +POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain extended select * from src_multi1 +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select * from src_multi1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: src_multi1 + GatherStats: false + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + ListSink + +PREHOOK: query: describe formatted src_multi1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_multi1 +POSTHOOK: query: describe formatted src_multi1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_multi1 +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table a +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table a +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table b +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table b +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table a like src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@a +POSTHOOK: query: create table a like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@a +PREHOOK: query: create table b like src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@b +POSTHOOK: query: create table b like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@b +PREHOOK: query: from src +insert into table a select * +insert into table b select * +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@a +PREHOOK: Output: default@b +POSTHOOK: query: from src +insert into table a select * +insert into table b select * +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@a +POSTHOOK: Output: default@b +POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted a key +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@a +POSTHOOK: query: describe formatted a key +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@a +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +key string 0 205 2.812 3 from deserializer +PREHOOK: query: describe formatted b key +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b key +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +key string 0 205 2.812 3 from deserializer +PREHOOK: query: from src +insert overwrite table a select * +insert into table b select * +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@a +PREHOOK: Output: default@b +POSTHOOK: query: from src +insert overwrite table a select * +insert into table b select * +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@a +POSTHOOK: Output: default@b +POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted a +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@a +POSTHOOK: query: describe formatted a +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@a +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: describe formatted b +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 2 + numRows 1000 + rawDataSize 10624 + totalSize 11624 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: describe formatted b key +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b key +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +key string 0 205 2.812 3 from deserializer +PREHOOK: query: describe formatted b value +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b value +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +value string 0 214 6.812 7 from deserializer +PREHOOK: query: insert into table b select NULL, NULL from src limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@b +POSTHOOK: query: insert into table b select NULL, NULL from src limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@b +POSTHOOK: Lineage: b.key SIMPLE [] +POSTHOOK: Lineage: b.value SIMPLE [] +PREHOOK: query: describe formatted b key +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b key +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +key string 10 205 2.812 3 from deserializer +PREHOOK: query: describe formatted b value +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b value +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +value string 10 214 6.812 7 from deserializer +PREHOOK: query: insert into table b(value) select key+100000 from src limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@b +POSTHOOK: query: insert into table b(value) select key+100000 from src limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@b +POSTHOOK: Lineage: b.key SIMPLE [] +POSTHOOK: Lineage: b.value EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: describe formatted b key +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b key +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +key string 20 205 2.812 3 from deserializer +PREHOOK: query: describe formatted b value +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@b +POSTHOOK: query: describe formatted b value +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@b +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +value string 10 266 8.0 8 from deserializer +PREHOOK: query: drop table src_multi2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table src_multi2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table src_multi2 like src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_multi2 +POSTHOOK: query: create table src_multi2 like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_multi2 +PREHOOK: query: insert into table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +PREHOOK: Output: default@src_multi2 +POSTHOOK: query: insert into table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@src_multi2 +POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted src_multi2 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_multi2 +POSTHOOK: query: describe formatted src_multi2 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_multi2 +# col_name data_type comment + +key string default +value string default + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 508 + rawDataSize 5400 + totalSize 5908 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table nzhang_part14 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table nzhang_part14 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table if not exists nzhang_part14 (key string) + partitioned by (value string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: create table if not exists nzhang_part14 (key string) + partitioned by (value string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: insert into table nzhang_part14 partition(value) +select key, value from ( + select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a + union all + select * from (select 'k2' as key, '' as value from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value from src limit 2)c +) T +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: insert into table nzhang_part14 partition(value) +select key, value from ( + select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a + union all + select * from (select 'k2' as key, '' as value from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value from src limit 2)c +) T +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@nzhang_part14@value= +POSTHOOK: Output: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Lineage: nzhang_part14 PARTITION(value= ).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION [] +PREHOOK: query: explain select key from nzhang_part14 +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from nzhang_part14 +POSTHOOK: type: QUERY +Plan optimized by CBO. + +Stage-0 + Fetch Operator + limit:-1 + Select Operator [SEL_1] + Output:["_col0"] + TableScan [TS_0] + Output:["key"] + +PREHOOK: query: drop table src5 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table src5 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table src5 as select key, value from src limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src5 +POSTHOOK: query: create table src5 as select key, value from src limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src5 +POSTHOOK: Lineage: src5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert into table nzhang_part14 partition(value) +select key, value from src5 +PREHOOK: type: QUERY +PREHOOK: Input: default@src5 +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: insert into table nzhang_part14 partition(value) +select key, value from src5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src5 +POSTHOOK: Output: default@nzhang_part14@value=val_165 +POSTHOOK: Output: default@nzhang_part14@value=val_238 +POSTHOOK: Output: default@nzhang_part14@value=val_27 +POSTHOOK: Output: default@nzhang_part14@value=val_311 +POSTHOOK: Output: default@nzhang_part14@value=val_86 +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_165).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_238).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_27).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_311).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_86).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: explain select key from nzhang_part14 +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from nzhang_part14 +POSTHOOK: type: QUERY +Plan optimized by CBO. + +Stage-0 + Fetch Operator + limit:-1 + Select Operator [SEL_1] + Output:["_col0"] + TableScan [TS_0] + Output:["key"] + +PREHOOK: query: drop table alter5 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table alter5 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@alter5 +POSTHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@alter5 +PREHOOK: query: alter table alter5 add partition (dt='a') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@alter5 +POSTHOOK: query: alter table alter5 add partition (dt='a') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@alter5 +POSTHOOK: Output: default@alter5@dt=a +PREHOOK: query: describe formatted alter5 partition (dt='a') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter5 +POSTHOOK: query: describe formatted alter5 partition (dt='a') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter5 +# col_name data_type comment + +col1 string + +# Partition Information +# col_name data_type comment + +dt string + +# Detailed Partition Information +Partition Value: [a] +Database: default +Table: alter5 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + numFiles 0 + numRows 0 + rawDataSize 0 + totalSize 0 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table alter5 partition (dt='a') select key from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@alter5@dt=a +POSTHOOK: query: insert into table alter5 partition (dt='a') select key from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@alter5@dt=a +POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: describe formatted alter5 partition (dt='a') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter5 +POSTHOOK: query: describe formatted alter5 partition (dt='a') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter5 +# col_name data_type comment + +col1 string + +# Partition Information +# col_name data_type comment + +dt string + +# Detailed Partition Information +Partition Value: [a] +Database: default +Table: alter5 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"col1\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 500 + rawDataSize 1406 + totalSize 1906 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain select * from alter5 where dt='a' +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from alter5 where dt='a' +POSTHOOK: type: QUERY +Plan optimized by CBO. + +Stage-0 + Fetch Operator + limit:-1 + Select Operator [SEL_2] + Output:["_col0","_col1"] + TableScan [TS_0] + Output:["col1"] + +PREHOOK: query: drop table alter5 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@alter5 +PREHOOK: Output: default@alter5 +POSTHOOK: query: drop table alter5 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@alter5 +POSTHOOK: Output: default@alter5 +PREHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@alter5 +POSTHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@alter5 +PREHOOK: query: alter table alter5 add partition (dt='a') location 'parta' +PREHOOK: type: ALTERTABLE_ADDPARTS +#### A masked pattern was here #### +PREHOOK: Output: default@alter5 +POSTHOOK: query: alter table alter5 add partition (dt='a') location 'parta' +POSTHOOK: type: ALTERTABLE_ADDPARTS +#### A masked pattern was here #### +POSTHOOK: Output: default@alter5 +POSTHOOK: Output: default@alter5@dt=a +PREHOOK: query: describe formatted alter5 partition (dt='a') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter5 +POSTHOOK: query: describe formatted alter5 partition (dt='a') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter5 +# col_name data_type comment + +col1 string + +# Partition Information +# col_name data_type comment + +dt string + +# Detailed Partition Information +Partition Value: [a] +Database: default +Table: alter5 +#### A masked pattern was here #### +Partition Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table alter5 partition (dt='a') select key from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@alter5@dt=a +POSTHOOK: query: insert into table alter5 partition (dt='a') select key from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@alter5@dt=a +POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: describe formatted alter5 partition (dt='a') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter5 +POSTHOOK: query: describe formatted alter5 partition (dt='a') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter5 +# col_name data_type comment + +col1 string + +# Partition Information +# col_name data_type comment + +dt string + +# Detailed Partition Information +Partition Value: [a] +Database: default +Table: alter5 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"col1\":\"true\"}} + numFiles 1 + totalSize 1906 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain select * from alter5 where dt='a' +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from alter5 where dt='a' +POSTHOOK: type: QUERY +Plan optimized by CBO. + +Stage-0 + Fetch Operator + limit:-1 + Select Operator [SEL_2] + Output:["_col0","_col1"] + TableScan [TS_0] + Output:["col1"] + +PREHOOK: query: drop table src_stat_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table src_stat_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_stat_part +POSTHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_stat_part +PREHOOK: query: insert into table src_stat_part partition (partitionId=1) +select * from src1 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: default@src_stat_part@partitionid=1 +POSTHOOK: query: insert into table src_stat_part partition (partitionId=1) +select * from src1 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@src_stat_part@partitionid=1 +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_stat_part +POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_stat_part +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +partitionid int + +# Detailed Partition Information +Partition Value: [1] +Database: default +Table: src_stat_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 5 + rawDataSize 38 + totalSize 43 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table src_stat_part partition (partitionId=2) +select * from src1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: default@src_stat_part@partitionid=2 +POSTHOOK: query: insert into table src_stat_part partition (partitionId=2) +select * from src1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@src_stat_part@partitionid=2 +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_stat_part +POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_stat_part +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +partitionid int + +# Detailed Partition Information +Partition Value: [2] +Database: default +Table: src_stat_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 25 + rawDataSize 191 + totalSize 216 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table srcbucket_mapjoin +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table srcbucket_mapjoin +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin +PREHOOK: query: drop table tab_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table tab_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab_part +POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab_part +PREHOOK: query: drop table srcbucket_mapjoin_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table srcbucket_mapjoin_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin_part +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: insert into table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin_part +PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: Output: default@tab_part@ds=2008-04-08 +POSTHOOK: query: insert into table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin_part +POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: Output: default@tab_part@ds=2008-04-08 +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: describe formatted tab_part partition (ds='2008-04-08') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@tab_part +POSTHOOK: query: describe formatted tab_part partition (ds='2008-04-08') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@tab_part +# col_name data_type comment + +key int +value string + +# Partition Information +# col_name data_type comment + +ds string + +# Detailed Partition Information +Partition Value: [2008-04-08] +Database: default +Table: tab_part +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 4 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 4 +Bucket Columns: [key] +Sort Columns: [Order(col:key, order:1)] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab +POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab +PREHOOK: query: insert into table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin +PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: Output: default@tab@ds=2008-04-08 +POSTHOOK: query: insert into table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin +POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 +POSTHOOK: Output: default@tab@ds=2008-04-08 +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: describe formatted tab partition (ds='2008-04-08') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@tab +POSTHOOK: query: describe formatted tab partition (ds='2008-04-08') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@tab +# col_name data_type comment + +key int +value string + +# Partition Information +# col_name data_type comment + +ds string + +# Detailed Partition Information +Partition Value: [2008-04-08] +Database: default +Table: tab +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 2 + numRows 242 + rawDataSize 2566 + totalSize 2808 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [key] +Sort Columns: [Order(col:key, order:1)] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table nzhang_part14 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@nzhang_part14 +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: drop table nzhang_part14 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@nzhang_part14 +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: create table if not exists nzhang_part14 (key string, value string) + partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string) + partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: describe formatted nzhang_part14 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: describe formatted nzhang_part14 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: insert into table nzhang_part14 partition(ds, hr) +select key, value, ds, hr from ( + select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a + union all + select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c +) T +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: insert into table nzhang_part14 partition(ds, hr) +select key, value, ds, hr from ( + select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a + union all + select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b + union all + select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c +) T +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@nzhang_part14@ds=1/hr=2 +POSTHOOK: Output: default@nzhang_part14@ds=1/hr=3 +POSTHOOK: Output: default@nzhang_part14@ds=2/hr=1 +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).value EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).value EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).value EXPRESSION [] +PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Partition Information +Partition Value: [1, 3] +Database: default +Table: nzhang_part14 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 2 + rawDataSize 6 + totalSize 8 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@nzhang_part14@ds=2010-03-03 +POSTHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=11 +POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=12 +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Partition Information +Partition Value: [2010-03-03, 12] +Database: default +Table: nzhang_part14 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 1000 + rawDataSize 10624 + totalSize 11624 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table nzhang_part14 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@nzhang_part14 +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: drop table nzhang_part14 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@nzhang_part14 +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: create table if not exists nzhang_part14 (key string, value string) +partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_part14 +POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string) +partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@nzhang_part14 +PREHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@nzhang_part14@ds=2010-03-03 +POSTHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=11 +POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=12 +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@nzhang_part14 +POSTHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@nzhang_part14 +# col_name data_type comment + +key string +value string + +# Partition Information +# col_name data_type comment + +ds string +hr string + +# Detailed Partition Information +Partition Value: [2010-03-03, 12] +Database: default +Table: nzhang_part14 +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"} + numFiles 1 + numRows 1000 + rawDataSize 10624 + totalSize 11624 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table a +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@a +PREHOOK: Output: default@a +POSTHOOK: query: drop table a +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@a +POSTHOOK: Output: default@a +PREHOOK: query: create table a (key string, value string) +partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@a +POSTHOOK: query: create table a (key string, value string) +partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@a +PREHOOK: query: drop table b +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@b +PREHOOK: Output: default@b +POSTHOOK: query: drop table b +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@b +POSTHOOK: Output: default@b +PREHOOK: query: create table b (key string, value string) +partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@b +POSTHOOK: query: create table b (key string, value string) +partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@b +PREHOOK: query: drop table c +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table c +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table c (key string, value string) +partitioned by (ds string, hr string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@c +POSTHOOK: query: create table c (key string, value string) +partitioned by (ds string, hr string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@c +PREHOOK: query: FROM srcpart +INSERT into TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10 +INSERT into TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11 +INSERT into TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@a@ds=2010-03-11 +PREHOOK: Output: default@b@ds=2010-04-11 +PREHOOK: Output: default@c@ds=2010-05-11 +POSTHOOK: query: FROM srcpart +INSERT into TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10 +INSERT into TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11 +INSERT into TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@a@ds=2010-03-11/hr=11 +POSTHOOK: Output: default@a@ds=2010-03-11/hr=12 +POSTHOOK: Output: default@b@ds=2010-04-11/hr=12 +POSTHOOK: Output: default@c@ds=2010-05-11/hr=11 +POSTHOOK: Output: default@c@ds=2010-05-11/hr=12 +POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: b PARTITION(ds=2010-04-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: b PARTITION(ds=2010-04-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select key from a +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from a +POSTHOOK: type: QUERY +Plan optimized by CBO. + +Stage-0 + Fetch Operator + limit:-1 + Select Operator [SEL_1] + Output:["_col0"] + TableScan [TS_0] + Output:["key"] + +PREHOOK: query: explain select value from b +PREHOOK: type: QUERY +POSTHOOK: query: explain select value from b +POSTHOOK: type: QUERY +Plan optimized by CBO. + +Stage-0 + Fetch Operator + limit:-1 + Select Operator [SEL_1] + Output:["_col0"] + TableScan [TS_0] + Output:["value"] + +PREHOOK: query: explain select key from b +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from b +POSTHOOK: type: QUERY +Plan optimized by CBO. + +Stage-0 + Fetch Operator + limit:-1 + Select Operator [SEL_1] + Output:["_col0"] + TableScan [TS_0] + Output:["key"] + +PREHOOK: query: explain select value from c +PREHOOK: type: QUERY +POSTHOOK: query: explain select value from c +POSTHOOK: type: QUERY +Plan optimized by CBO. + +Stage-0 + Fetch Operator + limit:-1 + Select Operator [SEL_1] + Output:["_col0"] + TableScan [TS_0] + Output:["value"] + +PREHOOK: query: explain select key from c +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from c +POSTHOOK: type: QUERY +Plan optimized by CBO. + +Stage-0 + Fetch Operator + limit:-1 + Select Operator [SEL_1] + Output:["_col0"] + TableScan [TS_0] + Output:["key"] +