From 4c958ed75db7a4f0660d50eb11710479dd81dadd Mon Sep 17 00:00:00 2001 From: paulgb Date: Fri, 19 Nov 2010 10:55:49 -0800 Subject: [PATCH] stats piggybacking on TableScanOperator --- .../java/org/apache/hadoop/hive/conf/HiveConf.java | 3 +- conf/hive-default.xml | 6 + .../hive/ql/optimizer/BucketMapJoinOptimizer.java | 2 +- .../hadoop/hive/ql/optimizer/GenMRTableScan1.java | 2 +- .../hadoop/hive/ql/optimizer/GenMapRedUtils.java | 2 +- .../hadoop/hive/ql/optimizer/GroupByOptimizer.java | 2 +- .../SortedMergeBucketMapJoinOptimizer.java | 2 +- .../hive/ql/optimizer/pcr/PcrOpProcFactory.java | 2 +- .../hive/ql/optimizer/ppr/PartitionPruner.java | 12 +- .../hadoop/hive/ql/parse/BaseSemanticAnalyzer.java | 11 +- .../apache/hadoop/hive/ql/parse/ParseContext.java | 14 + .../apache/hadoop/hive/ql/parse/QBParseInfo.java | 4 + .../hadoop/hive/ql/parse/SemanticAnalyzer.java | 177 +++- ql/src/test/queries/clientpositive/piggyback.q | 9 + .../test/queries/clientpositive/piggyback_create.q | 11 + ql/src/test/queries/clientpositive/piggyback_gby.q | 9 + .../test/queries/clientpositive/piggyback_join.q | 11 + .../test/queries/clientpositive/piggyback_limit.q | 7 + .../test/queries/clientpositive/piggyback_part.q | 9 + .../test/queries/clientpositive/piggyback_subq.q | 7 + .../test/queries/clientpositive/piggyback_union.q | 7 + ql/src/test/results/clientpositive/piggyback.q.out | 80 ++ .../results/clientpositive/piggyback_create.q.out | 124 +++ .../results/clientpositive/piggyback_gby.q.out | 109 ++ .../results/clientpositive/piggyback_join.q.out | 148 +++ .../results/clientpositive/piggyback_limit.q.out | 48 + .../results/clientpositive/piggyback_part.q.out | 1063 ++++++++++++++++++++ .../results/clientpositive/piggyback_subq.q.out | 73 ++ .../results/clientpositive/piggyback_union.q.out | 596 +++++++++++ 29 files changed, 2504 insertions(+), 46 deletions(-) create mode 100644 ql/src/test/queries/clientpositive/piggyback.q create mode 100644 ql/src/test/queries/clientpositive/piggyback_create.q create mode 100644 ql/src/test/queries/clientpositive/piggyback_gby.q create mode 100644 ql/src/test/queries/clientpositive/piggyback_join.q create mode 100644 ql/src/test/queries/clientpositive/piggyback_limit.q create mode 100644 ql/src/test/queries/clientpositive/piggyback_part.q create mode 100644 ql/src/test/queries/clientpositive/piggyback_subq.q create mode 100644 ql/src/test/queries/clientpositive/piggyback_union.q create mode 100644 ql/src/test/results/clientpositive/piggyback.q.out create mode 100644 ql/src/test/results/clientpositive/piggyback_create.q.out create mode 100644 ql/src/test/results/clientpositive/piggyback_gby.q.out create mode 100644 ql/src/test/results/clientpositive/piggyback_join.q.out create mode 100644 ql/src/test/results/clientpositive/piggyback_limit.q.out create mode 100644 ql/src/test/results/clientpositive/piggyback_part.q.out create mode 100644 ql/src/test/results/clientpositive/piggyback_subq.q.out create mode 100644 ql/src/test/results/clientpositive/piggyback_union.q.out diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index db3c800..72a6183 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -284,7 +284,8 @@ public class HiveConf extends Configuration { HIVEOPTREDUCEDEDUPLICATION("hive.optimize.reducededuplication", true), // Statistics - HIVESTATSAUTOGATHER("hive.stats.autogather", true), + HIVESTATSAUTOGATHER("hive.stats.autogather", true), // autogather stats on write? + HIVESTATSAUTOGATHERREAD("hive.stats.autogather.read", false), // autogather stats on read? HIVESTATSDBCLASS("hive.stats.dbclass", "jdbc:derby"), // other options are jdbc:mysql and hbase as defined in StatsSetupConst.java HIVESTATSJDBCDRIVER("hive.stats.jdbcdriver", diff --git conf/hive-default.xml conf/hive-default.xml index 85b312f..a54dd3e 100644 --- conf/hive-default.xml +++ conf/hive-default.xml @@ -663,6 +663,12 @@ + hive.stats.autogather.read + false + A flag to gather statistics automatically during the SELECT command. + + + hive.stats.jdbcdriver org.apache.derby.jdbc.EmbeddedDriver The JDBC driver for the database that stores temporary hive statistics. diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java index 5a0344b..a22ec51 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java @@ -204,7 +204,7 @@ public class BucketMapJoinOptimizer implements Transform { prunedParts = pGraphContext.getOpToPartList().get(tso); if (prunedParts == null) { prunedParts = PartitionPruner.prune(tbl, pGraphContext.getOpToPartPruner().get(tso), pGraphContext.getConf(), alias, - pGraphContext.getPrunedPartitions()); + pGraphContext.getPrunedPartitions(), pGraphContext); pGraphContext.getOpToPartList().put(tso, prunedParts); } } catch (HiveException e) { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java index 6162676..e008824 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java @@ -76,6 +76,7 @@ public class GenMRTableScan1 implements NodeProcessor { ctx.setCurrAliasId(currAliasId); mapCurrCtx.put(op, new GenMapRedCtx(currTask, currTopOp, currAliasId)); + currWork.setGatheringStats(true); QBParseInfo parseInfo = parseCtx.getQB().getParseInfo(); if (parseInfo.isAnalyzeCommand()) { @@ -88,7 +89,6 @@ public class GenMRTableScan1 implements NodeProcessor { Task statsTask = TaskFactory.get(statsWork, parseCtx.getConf()); currTask.addDependentTask(statsTask); ctx.getRootTasks().add(currTask); - currWork.setGatheringStats(true); // NOTE: here we should use the new partition predicate pushdown API to get a list of pruned list, // and pass it to setTaskPlan as the last parameter Set confirmedPartns = new HashSet(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 6fb3f5c..a004a69 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -550,7 +550,7 @@ public final class GenMapRedUtils { if (partsList == null) { partsList = PartitionPruner.prune(parseCtx.getTopToTable().get(topOp), parseCtx.getOpToPartPruner().get(topOp), opProcCtx.getConf(), - alias_id, parseCtx.getPrunedPartitions()); + alias_id, parseCtx.getPrunedPartitions(), parseCtx); parseCtx.getOpToPartList().put((TableScanOperator)topOp, partsList); } } catch (SemanticException e) { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java index 2e8f7c1..8df5214 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java @@ -207,7 +207,7 @@ public class GroupByOptimizer implements Transform { if (partsList == null) { partsList = PartitionPruner.prune(destTable, pGraphContext .getOpToPartPruner().get(ts), pGraphContext.getConf(), table, - pGraphContext.getPrunedPartitions()); + pGraphContext.getPrunedPartitions(), pGraphContext); pGraphContext.getOpToPartList().put(ts, partsList); } } catch (HiveException e) { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapJoinOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapJoinOptimizer.java index 6c9bdb6..ee52665 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapJoinOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapJoinOptimizer.java @@ -222,7 +222,7 @@ public class SortedMergeBucketMapJoinOptimizer implements Transform { if (prunedParts == null) { prunedParts = PartitionPruner.prune(tbl, pGraphContext .getOpToPartPruner().get(tso), pGraphContext.getConf(), alias, - pGraphContext.getPrunedPartitions()); + pGraphContext.getPrunedPartitions(), pGraphContext); pGraphContext.getOpToPartList().put(tso, prunedParts); } } catch (HiveException e) { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrOpProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrOpProcFactory.java index 3649bda..36ae0aa 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrOpProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrOpProcFactory.java @@ -112,7 +112,7 @@ public final class PcrOpProcFactory { prunedPartList = PartitionPruner.prune(owc.getParseContext().getTopToTable().get(top), ppr_pred, owc.getParseContext().getConf(), (String) owc.getParseContext().getTopOps().keySet() - .toArray()[0], owc.getParseContext().getPrunedPartitions()); + .toArray()[0], owc.getParseContext().getPrunedPartitions(), owc.getParseContext()); if (prunedPartList != null) { owc.getParseContext().getOpToPartList().put(top, prunedPartList); } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java index 1d616be..ecf557d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java @@ -50,6 +50,8 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec; /** * The transformation step that does partition pruning. @@ -151,7 +153,8 @@ public class PartitionPruner implements Transform { */ public static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr, HiveConf conf, String alias, - Map prunedPartitionsMap) throws HiveException { + Map prunedPartitionsMap, + ParseContext parseCtx) throws HiveException { LOG.trace("Started pruning partiton"); LOG.trace("tabname = " + tab.getTableName()); LOG.trace("prune Expression = " + prunerExpr); @@ -237,6 +240,13 @@ public class PartitionPruner implements Transform { // Now return the set of partitions ret = new PrunedPartitionList(true_parts, unkn_parts, denied_parts); prunedPartitionsMap.put(key, ret); + + List partitions = new ArrayList(); + partitions.addAll(true_parts); + partitions.addAll(unkn_parts); + tableSpec ts = new tableSpec(tab, tab.getTableName(), partitions); + parseCtx.setInputTableSpecs(alias, ts); + return ret; } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 3bddb05..e0481c9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -583,10 +583,19 @@ public abstract class BaseSemanticAnalyzer { public static enum SpecType {TABLE_ONLY, STATIC_PARTITION, DYNAMIC_PARTITION}; public SpecType specType; + /* Constructor for a "dummy" tableSpec used for stats publishing */ + public tableSpec(Table tableHandle, String tableName, List partitions) { + this.tableName = tableName; + this.tableHandle = tableHandle; + this.partitions = partitions; + } + public tableSpec(Hive db, HiveConf conf, ASTNode ast) throws SemanticException { - assert (ast.getToken().getType() == HiveParser.TOK_TAB || ast.getToken().getType() == HiveParser.TOK_TABTYPE); + assert (ast.getToken().getType() == HiveParser.TOK_TAB + || ast.getToken().getType() == HiveParser.TOK_TABTYPE + || ast.getToken().getType() == HiveParser.TOK_TABREF) : ast.dump(); int childIndex = 0; numDynParts = 0; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index 937a7b3..1847139 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -41,6 +41,8 @@ import org.apache.hadoop.hive.ql.plan.LoadFileDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.MapJoinDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec; +import org.apache.hadoop.hive.ql.metadata.Partition; /** * Parse Context: The current parse context. This is passed to the optimizer @@ -77,6 +79,17 @@ public class ParseContext { private Map> groupOpToInputTables; private Map prunedPartitions; + private Map aliasToInputTableSpecs; + + public void setInputTableSpecs(String key, tableSpec ts) { + aliasToInputTableSpecs.put(key, ts); + } + + public tableSpec getInputTableSpecs(String key) { + tableSpec ts = aliasToInputTableSpecs.get(key); + return ts; + } + /** * The lineage information. */ @@ -163,6 +176,7 @@ public class ParseContext { this.listMapJoinOpsNoReducer = listMapJoinOpsNoReducer; hasNonPartCols = false; this.groupOpToInputTables = new HashMap>(); + this.aliasToInputTableSpecs = new HashMap(); this.groupOpToInputTables = groupOpToInputTables; this.prunedPartitions = prunedPartitions; this.opToSamplePruner = opToSamplePruner; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java index c36adc7..967a03f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java @@ -309,6 +309,10 @@ public class QBParseInfo { return destToLimit.get(dest); } + public HashMap getDestToLimit() { + return destToLimit; + } + /** * @return the outerQueryLimit */ diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index b1bd428..87c9e9c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -28,6 +28,8 @@ import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; +import java.util.LinkedList; +import java.util.Queue; import java.util.Map; import java.util.Set; import java.util.TreeSet; @@ -41,8 +43,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JavaUtils; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -92,6 +94,7 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.optimizer.GenMRFileSink1; import org.apache.hadoop.hive.ql.optimizer.GenMROperator; +import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext; import org.apache.hadoop.hive.ql.optimizer.GenMRRedSink1; import org.apache.hadoop.hive.ql.optimizer.GenMRRedSink2; @@ -102,12 +105,12 @@ import org.apache.hadoop.hive.ql.optimizer.GenMRUnion1; import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; import org.apache.hadoop.hive.ql.optimizer.MapJoinFactory; import org.apache.hadoop.hive.ql.optimizer.Optimizer; -import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext; import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalOptimizer; import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec.SpecType; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec; import org.apache.hadoop.hive.ql.plan.AggregationDesc; import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc; @@ -122,6 +125,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeNullDesc; import org.apache.hadoop.hive.ql.plan.ExtractDesc; import org.apache.hadoop.hive.ql.plan.FetchWork; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc; import org.apache.hadoop.hive.ql.plan.ForwardDesc; import org.apache.hadoop.hive.ql.plan.GroupByDesc; @@ -140,27 +144,27 @@ import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; import org.apache.hadoop.hive.ql.plan.ScriptDesc; import org.apache.hadoop.hive.ql.plan.SelectDesc; +import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.plan.UDTFDesc; import org.apache.hadoop.hive.ql.plan.UnionDesc; -import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc; -import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.ResourceType; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFHash; import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.SerDeUtils; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; @@ -786,17 +790,44 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { qb.getMetaData().setSrcForAlias(alias, tab); + tableSpec ts = null; if (qb.getParseInfo().isAnalyzeCommand()) { - tableSpec ts = new tableSpec(db, conf, (ASTNode) ast.getChild(0)); - if (ts.specType == SpecType.DYNAMIC_PARTITION) { // dynamic partitions - try { - ts.partitions = db.getPartitionsByNames(ts.tableHandle, ts.partSpec); - } catch (HiveException e) { - throw new SemanticException("Cannot get partitions for " + ts.partSpec, e); + ts = new tableSpec(db, conf, (ASTNode) ast.getChild(0)); + } else { + // we are piggybacking stats tracking on a TableScanOperator + if (ast.getToken().getType() == HiveParser.TOK_CREATETABLE) { + // CREATE TABLE statement + ts = new tableSpec(db, conf, (ASTNode) ast.getChild(2).getChild(0).getChild(0)); + } else if (ast.getToken().getType() == HiveParser.TOK_QUERY) { + // SELECT query + if (ast.getChild(0).getChild(0).getType() == HiveParser.TOK_SUBQUERY) { + // If we are selecting from a subquery, don't gather stats + continue; + } else if (ast.getChild(0).getChild(0).getType() == HiveParser.TOK_JOIN) { + // If this is a join, we have to figure out which branch of the AST + // is represented by the current value of `alias`. + if (ast.getChild(0).getChild(0).getChild(0).getChild(0).getText() == alias) { + ts = new tableSpec(db, conf, (ASTNode) ast.getChild(0).getChild(0).getChild(0)); + } else { + ts = new tableSpec(db, conf, (ASTNode) ast.getChild(0).getChild(0).getChild(1)); + } + } else { + // Assume it's a regular SELECT query + ts = new tableSpec(db, conf, (ASTNode) ast.getChild(0).getChild(0)); } + } else { + // We should never get here + assert false; } - qb.getParseInfo().addTableSpec(alias, ts); } + if (ts.specType == SpecType.DYNAMIC_PARTITION) { // dynamic partitions + try { + ts.partitions = db.getPartitionsByNames(ts.tableHandle, ts.partSpec); + } catch (HiveException e) { + throw new SemanticException("Cannot get partitions for " + ts.partSpec, e); + } + } + qb.getParseInfo().addTableSpec(alias, ts); } LOG.info("Get metadata for subqueries"); @@ -5686,7 +5717,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { // Create the root of the operator tree TableScanDesc tsDesc = new TableScanDesc(alias, vcList); - setupStats(tsDesc, qb.getParseInfo(), tab, alias); + if (qb.getParseInfo().getTableSpec(alias) != null) { + setupStats(tsDesc, qb.getParseInfo(), tab, alias); + } top = putOpInsertMap(OperatorFactory.get(tsDesc, new RowSchema(rwsch.getColumnInfos())), rwsch); @@ -5842,29 +5875,47 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { private void setupStats(TableScanDesc tsDesc, QBParseInfo qbp, Table tab, String alias) throws SemanticException { - if (!qbp.isAnalyzeCommand()) { - tsDesc.setGatherStats(false); - } else { + if (qbp.isAnalyzeCommand()) { tsDesc.setGatherStats(true); + } else if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHERREAD) && + qbp.getDestToLimit().isEmpty()) { + // if we are autogathering stats on read and this query is NOT limited, + // we gather stats on this TableScanOperator + // TODO: is getOuterQueryLimit the right method here? + tsDesc.setGatherStats(true); + } else { + tsDesc.setGatherStats(false); + return; + } - String tblName = tab.getTableName(); - tableSpec tblSpec = qbp.getTableSpec(alias); - Map partSpec = tblSpec.getPartSpec(); + String tblName = tab.getTableName(); + tableSpec tblSpec = qbp.getTableSpec(alias); + Map partSpec = tblSpec.getPartSpec(); - if (partSpec != null) { - List cols = new ArrayList(); - cols.addAll(partSpec.keySet()); - tsDesc.setPartColumns(cols); - } + // Theoretically the key prefix could be any unique string shared + // between TableScanOperator (when publishing) and StatsTask (when aggregating). + // Here we use + // table_name + partitionSec + // as the prefix for easy of read during explain and debugging. + // Currently, partition spec can only be static partition. + String k = tblName + Path.SEPARATOR; + tsDesc.setStatsAggPrefix(k); - // Theoretically the key prefix could be any unique string shared - // between TableScanOperator (when publishing) and StatsTask (when aggregating). - // Here we use - // table_name + partitionSec - // as the prefix for easy of read during explain and debugging. - // Currently, partition spec can only be static partition. - String k = tblName + Path.SEPARATOR; - tsDesc.setStatsAggPrefix(k); + if (!qbp.isAnalyzeCommand()) { + // Get partition names from the table handle + List partitions = tab.getPartCols(); + List partNames = new ArrayList(); + for (FieldSchema fs : partitions) { + partNames.add(fs.getName()); + } + tsDesc.setPartColumns(partNames); + } else { + // Get partition names from the partition spec + if (partSpec != null) { + List cols = new ArrayList(); + cols.addAll(partSpec.keySet()); + tsDesc.setPartColumns(cols); + } // set up WritenEntity for replication outputs.add(new WriteEntity(tab)); @@ -6089,8 +6140,55 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { } } + /** + * Add StatsTask to all MapRedTasks with a TableScanOperator in the given list + * of tasks. + * + * @param tasks a list of tasks + * @param conf a hive configuration object + * @param pctx the current parse context + */ + private void addStatsTask(List> tasks, HiveConf conf, ParseContext pctx) { + for (Task task : tasks) { + if (MapRedTask.class.isInstance(task)) { + MapRedTask mrTask = (MapRedTask) task; + MapredWork mrWork = (MapredWork) mrTask.getWork(); + + LinkedHashMap> aliasToWork = mrWork.getAliasToWork(); + + for (String key : aliasToWork.keySet()) { + Queue> opsToProcess = new LinkedList>(); + Operator op = aliasToWork.get(key); + opsToProcess.add(op); + while(!opsToProcess.isEmpty()) { + op = opsToProcess.remove(); + if (TableScanOperator.class.isInstance(op)) { + TableScanOperator tso = (TableScanOperator) op; + TableScanDesc tsd = (TableScanDesc) op.getConf(); + if (tsd.isGatherStats()) { + tableSpec ts = pctx.getInputTableSpecs(key); + StatsWork statsWork = new StatsWork(ts); + String k = key + Path.SEPARATOR; + statsWork.setAggKey(k); + Task statsTask = TaskFactory.get(statsWork, conf); + task.addDependentTask(statsTask); + } + } else { + List children = (List) op.getChildren(); + if (children != null) { + for (Node child : children) { + opsToProcess.add((Operator) child); + } + } + } + } + } + } + } + } + @SuppressWarnings("nls") - private void genMapRedTasks(QB qb) throws SemanticException { + private void genMapRedTasks(QB qb, ParseContext pctx) throws SemanticException { FetchWork fetch = null; List> mvTask = new ArrayList>(); FetchTask fetchTask = null; @@ -6129,7 +6227,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { if (partsList == null) { partsList = PartitionPruner.prune(topToTable.get(ts), opToPartPruner.get(ts), conf, (String) topOps.keySet() - .toArray()[0], prunedPartitions); + .toArray()[0], prunedPartitions, null); opToPartList.put(ts, partsList); } } catch (HiveException e) { @@ -6232,7 +6330,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { GenMRProcContext procCtx = new GenMRProcContext( conf, new HashMap, Task>(), - new ArrayList>(), getParseContext(), + new ArrayList>(), pctx, mvTask, rootTasks, new LinkedHashMap, GenMapRedCtx>(), inputs, outputs); @@ -6273,6 +6371,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { topNodes.addAll(topOps.values()); ogw.startWalking(topNodes, null); + if (!qb.getParseInfo().isAnalyzeCommand() && conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHERREAD)) { + // StatsTask for ANALYZE is added elsewhere + addStatsTask(rootTasks, conf, pctx); + } + // reduce sink does not have any kids - since the plan by now has been // broken up into multiple // tasks, iterate over all tasks. @@ -6550,7 +6653,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { // At this point we have the complete operator tree // from which we want to find the reduce operator - genMapRedTasks(qb); + genMapRedTasks(qb, pCtx); LOG.info("Completed plan generation"); diff --git ql/src/test/queries/clientpositive/piggyback.q ql/src/test/queries/clientpositive/piggyback.q new file mode 100644 index 0000000..8b902ae --- /dev/null +++ ql/src/test/queries/clientpositive/piggyback.q @@ -0,0 +1,9 @@ + +set hive.stats.autogather.read=true; + +explain select key from src1; + +select key from src1; + +desc extended src1; + diff --git ql/src/test/queries/clientpositive/piggyback_create.q ql/src/test/queries/clientpositive/piggyback_create.q new file mode 100644 index 0000000..25cd62b --- /dev/null +++ ql/src/test/queries/clientpositive/piggyback_create.q @@ -0,0 +1,11 @@ + +set hive.stats.autogather.read=true; + +explain create table my_table as select key from src1; + +create table my_table as select key from src1; + +desc extended src1; + +select * from my_table; + diff --git ql/src/test/queries/clientpositive/piggyback_gby.q ql/src/test/queries/clientpositive/piggyback_gby.q new file mode 100644 index 0000000..69f59eb --- /dev/null +++ ql/src/test/queries/clientpositive/piggyback_gby.q @@ -0,0 +1,9 @@ + +set hive.stats.autogather.read=true; + +explain select max(value) from src1 group by key; + +select max(value) from src1 group by key; + +desc extended src1; + diff --git ql/src/test/queries/clientpositive/piggyback_join.q ql/src/test/queries/clientpositive/piggyback_join.q new file mode 100644 index 0000000..24326a6 --- /dev/null +++ ql/src/test/queries/clientpositive/piggyback_join.q @@ -0,0 +1,11 @@ + +set hive.stats.autogather.read=true; + +explain select src1.key, src1.value, src.value from src1 join src on (src1.key = src.key); + +select src1.key, src1.value, src.value from src1 join src on (src1.key = src.key); + +desc extended src1; + +desc extended src; + diff --git ql/src/test/queries/clientpositive/piggyback_limit.q ql/src/test/queries/clientpositive/piggyback_limit.q new file mode 100644 index 0000000..06526d0 --- /dev/null +++ ql/src/test/queries/clientpositive/piggyback_limit.q @@ -0,0 +1,7 @@ + +set hive.stats.autogather.read=true; + +explain select key from src1 limit 4; + +select key from src1 limit 4; + diff --git ql/src/test/queries/clientpositive/piggyback_part.q ql/src/test/queries/clientpositive/piggyback_part.q new file mode 100644 index 0000000..11791a4 --- /dev/null +++ ql/src/test/queries/clientpositive/piggyback_part.q @@ -0,0 +1,9 @@ + +set hive.stats.autogather.read=true; + +explain select key from srcpart where ds = '2008-04-08'; + +select key from srcpart where ds = '2008-04-08'; + +desc extended srcpart; + diff --git ql/src/test/queries/clientpositive/piggyback_subq.q ql/src/test/queries/clientpositive/piggyback_subq.q new file mode 100644 index 0000000..cd2877a --- /dev/null +++ ql/src/test/queries/clientpositive/piggyback_subq.q @@ -0,0 +1,7 @@ + +set hive.stats.autogather.read=true; + +explain select key from (select key from src1) subq; + +select key from (select key from src1) subq; + diff --git ql/src/test/queries/clientpositive/piggyback_union.q ql/src/test/queries/clientpositive/piggyback_union.q new file mode 100644 index 0000000..72f5391 --- /dev/null +++ ql/src/test/queries/clientpositive/piggyback_union.q @@ -0,0 +1,7 @@ + +set hive.stats.autogather.read=true; + +explain select key from (select key from src1 union all select key from src) subq; + +select key from (select key from src1 union all select key from src) subq; + diff --git ql/src/test/results/clientpositive/piggyback.q.out ql/src/test/results/clientpositive/piggyback.q.out new file mode 100644 index 0000000..c2739a3 --- /dev/null +++ ql/src/test/results/clientpositive/piggyback.q.out @@ -0,0 +1,80 @@ +PREHOOK: query: explain select key from src1 +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from src1 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + src1 + TableScan + alias: src1 + Select Operator + expressions: + expr: key + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + Stage: Stage-2 + Stats-Aggr Operator + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +PREHOOK: query: select key from src1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: file:/tmp/pbutler/hive_2010-11-16_16-57-21_711_6227946490235451552/-mr-10000 +POSTHOOK: query: select key from src1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: file:/tmp/pbutler/hive_2010-11-16_16-57-21_711_6227946490235451552/-mr-10000 +238 + +311 + + + +255 +278 +98 + + + +401 +150 +273 +224 +369 +66 +128 +213 +146 +406 + + + +PREHOOK: query: desc extended src1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended src1 +POSTHOOK: type: DESCTABLE +key string default +value string default + +Detailed Table Information Table(tableName:src1, dbName:default, owner:null, createTime:1289955436, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/home/pbutler/hive-git/build/ql/test/data/warehouse/src1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1289955447, numRows=25, totalSize=216}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) diff --git ql/src/test/results/clientpositive/piggyback_create.q.out ql/src/test/results/clientpositive/piggyback_create.q.out new file mode 100644 index 0000000..445195c --- /dev/null +++ ql/src/test/results/clientpositive/piggyback_create.q.out @@ -0,0 +1,124 @@ +PREHOOK: query: explain create table my_table as select key from src1 +PREHOOK: type: CREATETABLE +POSTHOOK: query: explain create table my_table as select key from src1 +POSTHOOK: type: CREATETABLE +ABSTRACT SYNTAX TREE: + (TOK_CREATETABLE my_table TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_TABREF src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-4 depends on stages: Stage-1 , consists of Stage-3, Stage-2 + Stage-3 + Stage-0 depends on stages: Stage-3, Stage-2 + Stage-6 depends on stages: Stage-5, Stage-0 + Stage-2 + Stage-5 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + src1 + TableScan + alias: src1 + Select Operator + expressions: + expr: key + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + Stage: Stage-4 + Conditional Operator + + Stage: Stage-3 + Move Operator + files: + hdfs directory: true + destination: pfile:/home/pbutler/hive-git/build/ql/scratchdir/hive_2010-11-18_16-58-16_656_1240142683884832699/-ext-10001 + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true + destination: pfile:///home/pbutler/hive-git/build/ql/test/data/warehouse/my_table + + Stage: Stage-6 + Create Table Operator: + Create Table + columns: key string + if not exists: false + input format: org.apache.hadoop.mapred.TextInputFormat + # buckets: -1 + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: my_table + isExternal: false + + Stage: Stage-2 + Map Reduce + Alias -> Map Operator Tree: + pfile:/home/pbutler/hive-git/build/ql/scratchdir/hive_2010-11-18_16-58-16_656_1240142683884832699/-ext-10002 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + Stage: Stage-5 + Stats-Aggr Operator + + +PREHOOK: query: create table my_table as select key from src1 +PREHOOK: type: CREATETABLE +PREHOOK: Input: default@src1 +POSTHOOK: query: create table my_table as select key from src1 +POSTHOOK: type: CREATETABLE +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@my_table +PREHOOK: query: desc extended src1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended src1 +POSTHOOK: type: DESCTABLE +key string default +value string default + +Detailed Table Information Table(tableName:src1, dbName:default, owner:null, createTime:1290128291, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/home/pbutler/hive-git/build/ql/test/data/warehouse/src1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1290128304, numRows=25, totalSize=216}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: select * from my_table +PREHOOK: type: QUERY +PREHOOK: Input: default@my_table +PREHOOK: Output: file:/tmp/pbutler/hive_2010-11-18_16-58-24_698_1993748336425233528/-mr-10000 +POSTHOOK: query: select * from my_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@my_table +POSTHOOK: Output: file:/tmp/pbutler/hive_2010-11-18_16-58-24_698_1993748336425233528/-mr-10000 +238 + +311 + + + +255 +278 +98 + + + +401 +150 +273 +224 +369 +66 +128 +213 +146 +406 + + + diff --git ql/src/test/results/clientpositive/piggyback_gby.q.out ql/src/test/results/clientpositive/piggyback_gby.q.out new file mode 100644 index 0000000..cba9876 --- /dev/null +++ ql/src/test/results/clientpositive/piggyback_gby.q.out @@ -0,0 +1,109 @@ +PREHOOK: query: explain select max(value) from src1 group by key +PREHOOK: type: QUERY +POSTHOOK: query: explain select max(value) from src1 group by key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION max (TOK_TABLE_OR_COL value)))) (TOK_GROUPBY (TOK_TABLE_OR_COL key)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + src1 + TableScan + alias: src1 + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: key, value + Group By Operator + aggregations: + expr: max(value) + bucketGroup: false + keys: + expr: key + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: string + Reduce Operator Tree: + Group By Operator + aggregations: + expr: max(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col1 + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + Stage: Stage-2 + Stats-Aggr Operator + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +PREHOOK: query: select max(value) from src1 group by key +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: file:/tmp/pbutler/hive_2010-11-16_17-18-00_096_7034461645595402572/-mr-10000 +POSTHOOK: query: select max(value) from src1 group by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: file:/tmp/pbutler/hive_2010-11-16_17-18-00_096_7034461645595402572/-mr-10000 +val_484 + +val_146 +val_150 +val_213 + +val_238 +val_255 +val_273 +val_278 +val_311 + +val_401 +val_406 +val_66 +val_98 +PREHOOK: query: desc extended src1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended src1 +POSTHOOK: type: DESCTABLE +key string default +value string default + +Detailed Table Information Table(tableName:src1, dbName:default, owner:null, createTime:1289956673, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/home/pbutler/hive-git/build/ql/test/data/warehouse/src1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1289956687, numRows=25, totalSize=216}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) diff --git ql/src/test/results/clientpositive/piggyback_join.q.out ql/src/test/results/clientpositive/piggyback_join.q.out new file mode 100644 index 0000000..dceef8d --- /dev/null +++ ql/src/test/results/clientpositive/piggyback_join.q.out @@ -0,0 +1,148 @@ +PREHOOK: query: explain select src1.key, src1.value, src.value from src1 join src on (src1.key = src.key) +PREHOOK: type: QUERY +POSTHOOK: query: explain select src1.key, src1.value, src.value from src1 join src on (src1.key = src.key) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF src1) (TOK_TABREF src) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-3 depends on stages: Stage-1 + Stage-4 depends on stages: Stage-1 + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + src + TableScan + alias: src + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 1 + value expressions: + expr: value + type: string + src1 + TableScan + alias: src1 + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 0 + value expressions: + expr: key + type: string + expr: value + type: string + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col0} {VALUE._col1} + 1 {VALUE._col1} + handleSkewJoin: false + outputColumnNames: _col0, _col1, _col5 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col5 + type: string + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Stats-Aggr Operator + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +PREHOOK: query: select src1.key, src1.value, src.value from src1 join src on (src1.key = src.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +PREHOOK: Output: file:/tmp/pbutler/hive_2010-11-16_17-07-25_402_4497962938795297612/-mr-10000 +POSTHOOK: query: select src1.key, src1.value, src.value from src1 join src on (src1.key = src.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +POSTHOOK: Output: file:/tmp/pbutler/hive_2010-11-16_17-07-25_402_4497962938795297612/-mr-10000 +128 val_128 +128 val_128 +128 val_128 +146 val_146 val_146 +146 val_146 val_146 +150 val_150 val_150 +213 val_213 val_213 +213 val_213 val_213 +224 val_224 +224 val_224 +238 val_238 val_238 +238 val_238 val_238 +255 val_255 val_255 +255 val_255 val_255 +273 val_273 val_273 +273 val_273 val_273 +273 val_273 val_273 +278 val_278 val_278 +278 val_278 val_278 +311 val_311 val_311 +311 val_311 val_311 +311 val_311 val_311 +369 val_369 +369 val_369 +369 val_369 +401 val_401 val_401 +401 val_401 val_401 +401 val_401 val_401 +401 val_401 val_401 +401 val_401 val_401 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +66 val_66 val_66 +98 val_98 val_98 +98 val_98 val_98 +PREHOOK: query: desc extended src1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended src1 +POSTHOOK: type: DESCTABLE +key string default +value string default + +Detailed Table Information Table(tableName:src1, dbName:default, owner:null, createTime:1289956040, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/home/pbutler/hive-git/build/ql/test/data/warehouse/src1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1289956053, numRows=25, totalSize=216}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: desc extended src +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended src +POSTHOOK: type: DESCTABLE +key string default +value string default + +Detailed Table Information Table(tableName:src, dbName:default, owner:null, createTime:1289956040, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/home/pbutler/hive-git/build/ql/test/data/warehouse/src, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1289956053, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) diff --git ql/src/test/results/clientpositive/piggyback_limit.q.out ql/src/test/results/clientpositive/piggyback_limit.q.out new file mode 100644 index 0000000..9a4a212 --- /dev/null +++ ql/src/test/results/clientpositive/piggyback_limit.q.out @@ -0,0 +1,48 @@ +PREHOOK: query: explain select key from src1 limit 4 +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from src1 limit 4 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))) (TOK_LIMIT 4))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + src1 + TableScan + alias: src1 + Select Operator + expressions: + expr: key + type: string + outputColumnNames: _col0 + Limit + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + Stage: Stage-0 + Fetch Operator + limit: 4 + + +PREHOOK: query: select key from src1 limit 4 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: file:/tmp/pbutler/hive_2010-11-19_10-08-43_240_356955432043033555/-mr-10000 +POSTHOOK: query: select key from src1 limit 4 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: file:/tmp/pbutler/hive_2010-11-19_10-08-43_240_356955432043033555/-mr-10000 +238 + +311 + diff --git ql/src/test/results/clientpositive/piggyback_part.q.out ql/src/test/results/clientpositive/piggyback_part.q.out new file mode 100644 index 0000000..2768a32 --- /dev/null +++ ql/src/test/results/clientpositive/piggyback_part.q.out @@ -0,0 +1,1063 @@ +PREHOOK: query: explain select key from srcpart where ds = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from srcpart where ds = '2008-04-08' +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF srcpart)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '2008-04-08')))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + srcpart + TableScan + alias: srcpart + Filter Operator + predicate: + expr: (ds = '2008-04-08') + type: boolean + Select Operator + expressions: + expr: key + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + Stage: Stage-2 + Stats-Aggr Operator + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +PREHOOK: query: select key from srcpart where ds = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Output: file:/tmp/pbutler/hive_2010-11-18_16-34-17_155_7807013078801717813/-mr-10000 +POSTHOOK: query: select key from srcpart where ds = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Output: file:/tmp/pbutler/hive_2010-11-18_16-34-17_155_7807013078801717813/-mr-10000 +238 +86 +311 +27 +165 +409 +255 +278 +98 +484 +265 +193 +401 +150 +273 +224 +369 +66 +128 +213 +146 +406 +429 +374 +152 +469 +145 +495 +37 +327 +281 +277 +209 +15 +82 +403 +166 +417 +430 +252 +292 +219 +287 +153 +193 +338 +446 +459 +394 +237 +482 +174 +413 +494 +207 +199 +466 +208 +174 +399 +396 +247 +417 +489 +162 +377 +397 +309 +365 +266 +439 +342 +367 +325 +167 +195 +475 +17 +113 +155 +203 +339 +0 +455 +128 +311 +316 +57 +302 +205 +149 +438 +345 +129 +170 +20 +489 +157 +378 +221 +92 +111 +47 +72 +4 +280 +35 +427 +277 +208 +356 +399 +169 +382 +498 +125 +386 +437 +469 +192 +286 +187 +176 +54 +459 +51 +138 +103 +239 +213 +216 +430 +278 +176 +289 +221 +65 +318 +332 +311 +275 +137 +241 +83 +333 +180 +284 +12 +230 +181 +67 +260 +404 +384 +489 +353 +373 +272 +138 +217 +84 +348 +466 +58 +8 +411 +230 +208 +348 +24 +463 +431 +179 +172 +42 +129 +158 +119 +496 +0 +322 +197 +468 +393 +454 +100 +298 +199 +191 +418 +96 +26 +165 +327 +230 +205 +120 +131 +51 +404 +43 +436 +156 +469 +468 +308 +95 +196 +288 +481 +457 +98 +282 +197 +187 +318 +318 +409 +470 +137 +369 +316 +169 +413 +85 +77 +0 +490 +87 +364 +179 +118 +134 +395 +282 +138 +238 +419 +15 +118 +72 +90 +307 +19 +435 +10 +277 +273 +306 +224 +309 +389 +327 +242 +369 +392 +272 +331 +401 +242 +452 +177 +226 +5 +497 +402 +396 +317 +395 +58 +35 +336 +95 +11 +168 +34 +229 +233 +143 +472 +322 +498 +160 +195 +42 +321 +430 +119 +489 +458 +78 +76 +41 +223 +492 +149 +449 +218 +228 +138 +453 +30 +209 +64 +468 +76 +74 +342 +69 +230 +33 +368 +103 +296 +113 +216 +367 +344 +167 +274 +219 +239 +485 +116 +223 +256 +263 +70 +487 +480 +401 +288 +191 +5 +244 +438 +128 +467 +432 +202 +316 +229 +469 +463 +280 +2 +35 +283 +331 +235 +80 +44 +193 +321 +335 +104 +466 +366 +175 +403 +483 +53 +105 +257 +406 +409 +190 +406 +401 +114 +258 +90 +203 +262 +348 +424 +12 +396 +201 +217 +164 +431 +454 +478 +298 +125 +431 +164 +424 +187 +382 +5 +70 +397 +480 +291 +24 +351 +255 +104 +70 +163 +438 +119 +414 +200 +491 +237 +439 +360 +248 +479 +305 +417 +199 +444 +120 +429 +169 +443 +323 +325 +277 +230 +478 +178 +468 +310 +317 +333 +493 +460 +207 +249 +265 +480 +83 +136 +353 +172 +214 +462 +233 +406 +133 +175 +189 +454 +375 +401 +421 +407 +384 +256 +26 +134 +67 +384 +379 +18 +462 +492 +100 +298 +9 +341 +498 +146 +458 +362 +186 +285 +348 +167 +18 +273 +183 +281 +344 +97 +469 +315 +84 +28 +37 +448 +152 +348 +307 +194 +414 +477 +222 +126 +90 +169 +403 +400 +200 +97 +238 +86 +311 +27 +165 +409 +255 +278 +98 +484 +265 +193 +401 +150 +273 +224 +369 +66 +128 +213 +146 +406 +429 +374 +152 +469 +145 +495 +37 +327 +281 +277 +209 +15 +82 +403 +166 +417 +430 +252 +292 +219 +287 +153 +193 +338 +446 +459 +394 +237 +482 +174 +413 +494 +207 +199 +466 +208 +174 +399 +396 +247 +417 +489 +162 +377 +397 +309 +365 +266 +439 +342 +367 +325 +167 +195 +475 +17 +113 +155 +203 +339 +0 +455 +128 +311 +316 +57 +302 +205 +149 +438 +345 +129 +170 +20 +489 +157 +378 +221 +92 +111 +47 +72 +4 +280 +35 +427 +277 +208 +356 +399 +169 +382 +498 +125 +386 +437 +469 +192 +286 +187 +176 +54 +459 +51 +138 +103 +239 +213 +216 +430 +278 +176 +289 +221 +65 +318 +332 +311 +275 +137 +241 +83 +333 +180 +284 +12 +230 +181 +67 +260 +404 +384 +489 +353 +373 +272 +138 +217 +84 +348 +466 +58 +8 +411 +230 +208 +348 +24 +463 +431 +179 +172 +42 +129 +158 +119 +496 +0 +322 +197 +468 +393 +454 +100 +298 +199 +191 +418 +96 +26 +165 +327 +230 +205 +120 +131 +51 +404 +43 +436 +156 +469 +468 +308 +95 +196 +288 +481 +457 +98 +282 +197 +187 +318 +318 +409 +470 +137 +369 +316 +169 +413 +85 +77 +0 +490 +87 +364 +179 +118 +134 +395 +282 +138 +238 +419 +15 +118 +72 +90 +307 +19 +435 +10 +277 +273 +306 +224 +309 +389 +327 +242 +369 +392 +272 +331 +401 +242 +452 +177 +226 +5 +497 +402 +396 +317 +395 +58 +35 +336 +95 +11 +168 +34 +229 +233 +143 +472 +322 +498 +160 +195 +42 +321 +430 +119 +489 +458 +78 +76 +41 +223 +492 +149 +449 +218 +228 +138 +453 +30 +209 +64 +468 +76 +74 +342 +69 +230 +33 +368 +103 +296 +113 +216 +367 +344 +167 +274 +219 +239 +485 +116 +223 +256 +263 +70 +487 +480 +401 +288 +191 +5 +244 +438 +128 +467 +432 +202 +316 +229 +469 +463 +280 +2 +35 +283 +331 +235 +80 +44 +193 +321 +335 +104 +466 +366 +175 +403 +483 +53 +105 +257 +406 +409 +190 +406 +401 +114 +258 +90 +203 +262 +348 +424 +12 +396 +201 +217 +164 +431 +454 +478 +298 +125 +431 +164 +424 +187 +382 +5 +70 +397 +480 +291 +24 +351 +255 +104 +70 +163 +438 +119 +414 +200 +491 +237 +439 +360 +248 +479 +305 +417 +199 +444 +120 +429 +169 +443 +323 +325 +277 +230 +478 +178 +468 +310 +317 +333 +493 +460 +207 +249 +265 +480 +83 +136 +353 +172 +214 +462 +233 +406 +133 +175 +189 +454 +375 +401 +421 +407 +384 +256 +26 +134 +67 +384 +379 +18 +462 +492 +100 +298 +9 +341 +498 +146 +458 +362 +186 +285 +348 +167 +18 +273 +183 +281 +344 +97 +469 +315 +84 +28 +37 +448 +152 +348 +307 +194 +414 +477 +222 +126 +90 +169 +403 +400 +200 +97 +PREHOOK: query: desc extended srcpart +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended srcpart +POSTHOOK: type: DESCTABLE +key string default +value string default +ds string +hr string + +Detailed Table Information Table(tableName:srcpart, dbName:default, owner:null, createTime:1290126840, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/home/pbutler/hive-git/build/ql/test/data/warehouse/srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, transient_lastDdlTime=1290126867, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) diff --git ql/src/test/results/clientpositive/piggyback_subq.q.out ql/src/test/results/clientpositive/piggyback_subq.q.out new file mode 100644 index 0000000..ef6a518 --- /dev/null +++ ql/src/test/results/clientpositive/piggyback_subq.q.out @@ -0,0 +1,73 @@ +PREHOOK: query: explain select key from (select key from src1) subq +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from (select key from src1) subq +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))))) subq)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + subq:src1 + TableScan + alias: src1 + Select Operator + expressions: + expr: key + type: string + outputColumnNames: _col0 + Select Operator + expressions: + expr: _col0 + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +PREHOOK: query: select key from (select key from src1) subq +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: file:/tmp/pbutler/hive_2010-11-18_16-41-46_470_2888583644987921797/-mr-10000 +POSTHOOK: query: select key from (select key from src1) subq +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: file:/tmp/pbutler/hive_2010-11-18_16-41-46_470_2888583644987921797/-mr-10000 +238 + +311 + + + +255 +278 +98 + + + +401 +150 +273 +224 +369 +66 +128 +213 +146 +406 + + + diff --git ql/src/test/results/clientpositive/piggyback_union.q.out ql/src/test/results/clientpositive/piggyback_union.q.out new file mode 100644 index 0000000..fc345fd --- /dev/null +++ ql/src/test/results/clientpositive/piggyback_union.q.out @@ -0,0 +1,596 @@ +PREHOOK: query: explain select key from (select key from src1 union all select key from src) subq +PREHOOK: type: QUERY +POSTHOOK: query: explain select key from (select key from src1 union all select key from src) subq +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))))) (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)))))) subq)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + null-subquery1:subq-subquery1:src1 + TableScan + alias: src1 + Select Operator + expressions: + expr: key + type: string + outputColumnNames: _col0 + Union + Select Operator + expressions: + expr: _col0 + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + null-subquery2:subq-subquery2:src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + outputColumnNames: _col0 + Union + Select Operator + expressions: + expr: _col0 + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +PREHOOK: query: select key from (select key from src1 union all select key from src) subq +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +PREHOOK: Output: file:/tmp/pbutler/hive_2010-11-18_16-51-04_007_3527823884216367843/-mr-10000 +POSTHOOK: query: select key from (select key from src1 union all select key from src) subq +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +POSTHOOK: Output: file:/tmp/pbutler/hive_2010-11-18_16-51-04_007_3527823884216367843/-mr-10000 +238 + +311 + + + +255 +278 +98 + + + +401 +150 +273 +224 +369 +66 +128 +213 +146 +406 + + + +238 +86 +311 +27 +165 +409 +255 +278 +98 +484 +265 +193 +401 +150 +273 +224 +369 +66 +128 +213 +146 +406 +429 +374 +152 +469 +145 +495 +37 +327 +281 +277 +209 +15 +82 +403 +166 +417 +430 +252 +292 +219 +287 +153 +193 +338 +446 +459 +394 +237 +482 +174 +413 +494 +207 +199 +466 +208 +174 +399 +396 +247 +417 +489 +162 +377 +397 +309 +365 +266 +439 +342 +367 +325 +167 +195 +475 +17 +113 +155 +203 +339 +0 +455 +128 +311 +316 +57 +302 +205 +149 +438 +345 +129 +170 +20 +489 +157 +378 +221 +92 +111 +47 +72 +4 +280 +35 +427 +277 +208 +356 +399 +169 +382 +498 +125 +386 +437 +469 +192 +286 +187 +176 +54 +459 +51 +138 +103 +239 +213 +216 +430 +278 +176 +289 +221 +65 +318 +332 +311 +275 +137 +241 +83 +333 +180 +284 +12 +230 +181 +67 +260 +404 +384 +489 +353 +373 +272 +138 +217 +84 +348 +466 +58 +8 +411 +230 +208 +348 +24 +463 +431 +179 +172 +42 +129 +158 +119 +496 +0 +322 +197 +468 +393 +454 +100 +298 +199 +191 +418 +96 +26 +165 +327 +230 +205 +120 +131 +51 +404 +43 +436 +156 +469 +468 +308 +95 +196 +288 +481 +457 +98 +282 +197 +187 +318 +318 +409 +470 +137 +369 +316 +169 +413 +85 +77 +0 +490 +87 +364 +179 +118 +134 +395 +282 +138 +238 +419 +15 +118 +72 +90 +307 +19 +435 +10 +277 +273 +306 +224 +309 +389 +327 +242 +369 +392 +272 +331 +401 +242 +452 +177 +226 +5 +497 +402 +396 +317 +395 +58 +35 +336 +95 +11 +168 +34 +229 +233 +143 +472 +322 +498 +160 +195 +42 +321 +430 +119 +489 +458 +78 +76 +41 +223 +492 +149 +449 +218 +228 +138 +453 +30 +209 +64 +468 +76 +74 +342 +69 +230 +33 +368 +103 +296 +113 +216 +367 +344 +167 +274 +219 +239 +485 +116 +223 +256 +263 +70 +487 +480 +401 +288 +191 +5 +244 +438 +128 +467 +432 +202 +316 +229 +469 +463 +280 +2 +35 +283 +331 +235 +80 +44 +193 +321 +335 +104 +466 +366 +175 +403 +483 +53 +105 +257 +406 +409 +190 +406 +401 +114 +258 +90 +203 +262 +348 +424 +12 +396 +201 +217 +164 +431 +454 +478 +298 +125 +431 +164 +424 +187 +382 +5 +70 +397 +480 +291 +24 +351 +255 +104 +70 +163 +438 +119 +414 +200 +491 +237 +439 +360 +248 +479 +305 +417 +199 +444 +120 +429 +169 +443 +323 +325 +277 +230 +478 +178 +468 +310 +317 +333 +493 +460 +207 +249 +265 +480 +83 +136 +353 +172 +214 +462 +233 +406 +133 +175 +189 +454 +375 +401 +421 +407 +384 +256 +26 +134 +67 +384 +379 +18 +462 +492 +100 +298 +9 +341 +498 +146 +458 +362 +186 +285 +348 +167 +18 +273 +183 +281 +344 +97 +469 +315 +84 +28 +37 +448 +152 +348 +307 +194 +414 +477 +222 +126 +90 +169 +403 +400 +200 +97 -- 1.6.6.5.g743753