Index: ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java =================================================================== --- ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java (revision 1553449) +++ ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java (working copy) @@ -135,6 +135,8 @@ private String clusterMode; + private String hiveConfDir; + private String runDisabled; private String hadoopVersion; @@ -146,6 +148,14 @@ public String getHadoopVersion() { return hadoopVersion; } + + public void setHiveConfDir(String hiveConfDir) { + this.hiveConfDir = hiveConfDir; + } + + public String getHiveConfDir() { + return hiveConfDir; + } public void setClusterMode(String clusterMode) { this.clusterMode = clusterMode; @@ -418,6 +428,9 @@ if (hadoopVersion == null) { hadoopVersion = ""; } + if (hiveConfDir == null) { + hiveConfDir = ""; + } // For each of the qFiles generate the test VelocityContext ctx = new VelocityContext(); @@ -429,6 +442,7 @@ ctx.put("resultsDir", relativePath(hiveRootDir, resultsDir)); ctx.put("logDir", relativePath(hiveRootDir, logDir)); ctx.put("clusterMode", clusterMode); + ctx.put("hiveConfDir", hiveConfDir); ctx.put("hadoopVersion", hadoopVersion); File outFile = new File(outDir, className + ".java"); Index: cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java =================================================================== --- cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java (revision 1553449) +++ cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java (working copy) @@ -27,12 +27,12 @@ import java.io.IOException; import java.io.PrintStream; import java.io.UnsupportedEncodingException; +import java.sql.SQLException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Set; -import java.sql.SQLException; import jline.ArgumentCompletor; import jline.ArgumentCompletor.AbstractArgumentDelimiter; @@ -271,10 +271,15 @@ return ret; } + // query has run capture the time + long end = System.currentTimeMillis(); + double timeTaken = (end - start) / 1000.0; + ArrayList res = new ArrayList(); printHeader(qp, out); + // print the results int counter = 0; try { while (qp.getResults(res)) { @@ -299,11 +304,8 @@ ret = cret; } - long end = System.currentTimeMillis(); - double timeTaken = (end - start) / 1000.0; console.printInfo("Time taken: " + timeTaken + " seconds" + (counter == 0 ? "" : ", Fetched: " + counter + " row(s)")); - } else { String firstToken = tokenizeCmd(cmd.trim())[0]; String cmd_1 = getFirstCmd(cmd.trim(), firstToken.length()); Index: common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java =================================================================== --- common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java (revision 1553449) +++ common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java (working copy) @@ -47,6 +47,9 @@ public String getPublisher(Configuration conf) { return "org.apache.hadoop.hive.ql.stats.CounterStatsPublisher"; } public String getAggregator(Configuration conf) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_OPTIMIZE_TEZ)) { + return "org.apache.hadoop.hive.ql.stats.CounterStatsAggregatorTez"; + } return "org.apache.hadoop.hive.ql.stats.CounterStatsAggregator"; } }, custom { Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java =================================================================== --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1553449) +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy) @@ -517,6 +517,10 @@ HIVE_ORC_DICTIONARY_KEY_SIZE_THRESHOLD("hive.exec.orc.dictionary.key.size.threshold", 0.8f), + HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS("hive.orc.splits.include.file.footer", false), + HIVE_ORC_CACHE_STRIPE_DETAILS_SIZE("hive.orc.cache.stripe.details.size", 10000), + HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS("hive.orc.compute.splits.num.threads", 10), + HIVESKEWJOIN("hive.optimize.skewjoin", false), HIVECONVERTJOIN("hive.auto.convert.join", true), HIVECONVERTJOINNOCONDITIONALTASK("hive.auto.convert.join.noconditionaltask", true), @@ -854,7 +858,11 @@ // Whether to show the unquoted partition names in query results. HIVE_DECODE_PARTITION_NAME("hive.decode.partition.name", false), - //Vectorization enabled + HIVE_OPTIMIZE_TEZ("hive.optimize.tez", false), + HIVE_JAR_DIRECTORY("hive.jar.directory", "hdfs:///user/hive/"), + HIVE_USER_INSTALL_DIR("hive.user.install.directory", "hdfs:///user/"), + + // Vectorization enabled HIVE_VECTORIZATION_ENABLED("hive.vectorized.execution.enabled", false), HIVE_VECTORIZATION_GROUPBY_CHECKINTERVAL("hive.vectorized.groupby.checkinterval", 100000), HIVE_VECTORIZATION_GROUPBY_MAXENTRIES("hive.vectorized.groupby.maxentries", 1000000), @@ -863,6 +871,12 @@ HIVE_TYPE_CHECK_ON_INSERT("hive.typecheck.on.insert", true), + // Whether to send the query plan via local resource or RPC + HIVE_RPC_QUERY_PLAN("hive.rpc.query.plan", false), + + // Whether to generate the splits locally or in the AM (tez only) + HIVE_AM_SPLIT_GENERATION("hive.compute.splits.in.am", true), + // none, idonly, traverse, execution HIVESTAGEIDREARRANGE("hive.stageid.rearrange", "none"), HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES("hive.explain.dependency.append.tasktype", false), @@ -1332,7 +1346,11 @@ return hiveDefaultURL; } - public URL getHiveSiteLocation() { + public static void setHiveSiteLocation(URL location) { + hiveSiteURL = location; + } + + public static URL getHiveSiteLocation() { return hiveSiteURL; } Index: conf/hive-default.xml.template =================================================================== --- conf/hive-default.xml.template (revision 1553449) +++ conf/hive-default.xml.template (working copy) @@ -2025,6 +2025,15 @@ + hive.optimize.tez + false + + Setting this property turns on Tez execution. Needs tez installed on the + cluster. (Only availble on hadoop 2) + + + + hive.server2.table.type.mapping CLASSIC Index: data/conf/tez/hive-site.xml =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/xml Index: data/conf/tez/hive-site.xml =================================================================== --- data/conf/tez/hive-site.xml (revision 0) +++ data/conf/tez/hive-site.xml (working copy) Property changes on: data/conf/tez/hive-site.xml ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/xml \ No newline at end of property Index: hbase-handler/src/test/templates/TestHBaseCliDriver.vm =================================================================== --- hbase-handler/src/test/templates/TestHBaseCliDriver.vm (revision 1553449) +++ hbase-handler/src/test/templates/TestHBaseCliDriver.vm (working copy) @@ -24,6 +24,7 @@ import java.io.*; import java.util.*; +import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType; import org.apache.hadoop.hive.hbase.HBaseQTestUtil; import org.apache.hadoop.hive.hbase.HBaseTestSetup; import org.apache.hadoop.hive.ql.session.SessionState; @@ -42,10 +43,11 @@ @Override protected void setUp() { + + MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode"); + try { - boolean miniMR = "$clusterMode".equals("miniMR"); qt = new HBaseQTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, setup); - } catch (Exception e) { System.err.println("Exception: " + e.getMessage()); e.printStackTrace(); Index: hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm =================================================================== --- hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm (revision 1553449) +++ hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm (working copy) @@ -25,6 +25,7 @@ import java.io.*; import java.util.*; +import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType; import org.apache.hadoop.hive.hbase.HBaseQTestUtil; import org.apache.hadoop.hive.hbase.HBaseTestSetup; @@ -42,11 +43,11 @@ @Override protected void setUp() { + + MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode"); + try { - boolean miniMR = "$clusterMode".equals("miniMR"); - qt = new HBaseQTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, setup); - } catch (Exception e) { System.err.println("Exception: " + e.getMessage()); e.printStackTrace(); Index: itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java =================================================================== --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java (revision 1553449) +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java (working copy) @@ -24,6 +24,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType; + /** * Suite for testing location. e.g. if "alter table alter partition * location" is run, do the partitions end up in the correct location. @@ -82,7 +84,7 @@ return failedCount; } - public CheckResults(String outDir, String logDir, boolean miniMr, + public CheckResults(String outDir, String logDir, MiniClusterType miniMr, String hadoopVer, String locationSubdir) throws Exception { @@ -102,8 +104,9 @@ File[] qfiles = setupQFiles(testNames); QTestUtil[] qt = new QTestUtil[qfiles.length]; + for (int i = 0; i < qfiles.length; i++) { - qt[i] = new CheckResults(resDir, logDir, false, "0.20", "parta"); + qt[i] = new CheckResults(resDir, logDir, MiniClusterType.none, "0.20", "parta"); qt[i].addFile(qfiles[i]); qt[i].clearTestSideEffects(); } Index: itests/qtest/pom.xml =================================================================== --- itests/qtest/pom.xml (revision 1553449) +++ itests/qtest/pom.xml (working copy) @@ -38,6 +38,8 @@ false stats_counter_partitioned.q,list_bucket_dml_10.q,input16_cc.q,scriptfile1.q,scriptfile1_win.q,bucket4.q,bucketmapjoin6.q,disable_merge_for_bucketing.q,reduce_deduplicate.q,smb_mapjoin_8.q,join1.q,groupby2.q,bucketizedhiveinputformat.q,bucketmapjoin7.q,optrstat_groupby.q,bucket_num_reducers.q,bucket5.q,load_fs2.q,bucket_num_reducers2.q,infer_bucket_sort_merge.q,infer_bucket_sort_reducers_power_two.q,infer_bucket_sort_dyn_part.q,infer_bucket_sort_bucketed_table.q,infer_bucket_sort_map_operators.q,infer_bucket_sort_num_buckets.q,leftsemijoin_mr.q,schemeAuthority.q,schemeAuthority2.q,truncate_column_buckets.q,remote_script.q,,load_hdfs_file_with_space_in_the_name.q,parallel_orderby.q,import_exported_table.q,stats_counter.q,auto_sortmerge_join_16.q,quotedid_smb.q cluster_tasklog_retrieval.q,minimr_broken_pipe.q,mapreduce_stack_trace.q,mapreduce_stack_trace_turnoff.q,mapreduce_stack_trace_hadoop20.q,mapreduce_stack_trace_turnoff_hadoop20.q + tez_join_tests.q,tez_joins_explain.q,mrr.q,tez_dml.q,tez_insert_overwrite_local_directory_1.q + join0.q,join1.q,auto_join0.q,auto_join1.q,bucket2.q,bucket3.q,bucket4.q,count.q,create_merge_compressed.q,cross_join.q,ctas.q,custom_input_output_format.q,disable_merge_for_bucketing.q,enforce_order.q,filter_join_breaktask.q,filter_join_breaktask2.q,groupby1.q,groupby2.q,groupby3.q,having.q,insert1.q,insert_into1.q,insert_into2.q,leftsemijoin.q,limit_pushdown.q,load_dyn_part1.q,load_dyn_part2.q,load_dyn_part3.q,mapjoin_mapjoin.q,mapreduce1.q,mapreduce2.q,merge1.q,merge2.q,metadata_only_queries.q,sample1.q,subquery_in.q,subquery_exists.q,vectorization_15.q,ptf.q add_part_exist.q,alter1.q,alter2.q,alter4.q,alter5.q,alter_rename_partition.q,alter_rename_partition_authorization.q,archive.q,archive_corrupt.q,archive_multi.q,archive_mr_1806.q,archive_multi_mr_1806.q,authorization_1.q,authorization_2.q,authorization_4.q,authorization_5.q,authorization_6.q,authorization_7.q,ba_table1.q,ba_table2.q,ba_table3.q,ba_table_udfs.q,binary_table_bincolserde.q,binary_table_colserde.q,cluster.q,columnarserde_create_shortcut.q,combine2.q,constant_prop.q,create_nested_type.q,create_or_replace_view.q,create_struct_table.q,create_union_table.q,database.q,database_location.q,database_properties.q,ddltime.q,describe_database_json.q,drop_database_removes_partition_dirs.q,escape1.q,escape2.q,exim_00_nonpart_empty.q,exim_01_nonpart.q,exim_02_00_part_empty.q,exim_02_part.q,exim_03_nonpart_over_compat.q,exim_04_all_part.q,exim_04_evolved_parts.q,exim_05_some_part.q,exim_06_one_part.q,exim_07_all_part_over_nonoverlap.q,exim_08_nonpart_rename.q,exim_09_part_spec_nonoverlap.q,exim_10_external_managed.q,exim_11_managed_external.q,exim_12_external_location.q,exim_13_managed_location.q,exim_14_managed_location_over_existing.q,exim_15_external_part.q,exim_16_part_external.q,exim_17_part_managed.q,exim_18_part_external.q,exim_19_00_part_external_location.q,exim_19_part_external_location.q,exim_20_part_managed_location.q,exim_21_export_authsuccess.q,exim_22_import_exist_authsuccess.q,exim_23_import_part_authsuccess.q,exim_24_import_nonexist_authsuccess.q,global_limit.q,groupby_complex_types.q,groupby_complex_types_multi_single_reducer.q,index_auth.q,index_auto.q,index_auto_empty.q,index_bitmap.q,index_bitmap1.q,index_bitmap2.q,index_bitmap3.q,index_bitmap_auto.q,index_bitmap_rc.q,index_compact.q,index_compact_1.q,index_compact_2.q,index_compact_3.q,index_stale_partitioned.q,init_file.q,input16.q,input16_cc.q,input46.q,input_columnarserde.q,input_dynamicserde.q,input_lazyserde.q,input_testxpath3.q,input_testxpath4.q,insert2_overwrite_partitions.q,insertexternal1.q,join_thrift.q,lateral_view.q,load_binary_data.q,load_exist_part_authsuccess.q,load_nonpart_authsuccess.q,load_part_authsuccess.q,loadpart_err.q,lock1.q,lock2.q,lock3.q,lock4.q,merge_dynamic_partition.q,multi_insert.q,multi_insert_move_tasks_share_dependencies.q,null_column.q,ppd_clusterby.q,query_with_semi.q,rename_column.q,sample6.q,sample_islocalmode_hook.q,set_processor_namespaces.q,show_tables.q,source.q,split_sample.q,str_to_map.q,transform1.q,udaf_collect_set.q,udaf_context_ngrams.q,udaf_histogram_numeric.q,udaf_ngrams.q,udaf_percentile_approx.q,udf_array.q,udf_bitmap_and.q,udf_bitmap_or.q,udf_explode.q,udf_format_number.q,udf_map.q,udf_map_keys.q,udf_map_values.q,udf_max.q,udf_min.q,udf_named_struct.q,udf_percentile.q,udf_printf.q,udf_sentences.q,udf_sort_array.q,udf_split.q,udf_struct.q,udf_substr.q,udf_translate.q,udf_union.q,udf_xpath.q,udtf_stack.q,view.q,virtual_column.q @@ -107,7 +109,6 @@ test - @@ -260,6 +261,11 @@ test + commons-logging + commons-logging + ${commons-logging.version} + + org.apache.hadoop hadoop-yarn-server-tests ${hadoop-23.version} @@ -318,6 +324,48 @@ tests test + + org.apache.tez + tez-tests + ${tez.version} + test-jar + + + org.apache.tez + tez-api + ${tez.version} + test + + + org.apache.tez + tez-runtime-library + ${tez.version} + test + + + org.apache.tez + tez-mapreduce + ${tez.version} + test + + + org.apache.tez + tez-dag + ${tez.version} + test + + + org.apache.tez + tez-common + ${tez.version} + test + + + org.apache.tez + tez-runtime-internals + ${tez.version} + test + @@ -334,6 +382,7 @@ + + + + + + + + + + qSortSet; private static final String SORT_SUFFIX = ".sorted"; public static final HashSet srcTables = new HashSet(); + private static MiniClusterType clusterType = MiniClusterType.none; private ParseDriver pd; private Hive db; protected HiveConf conf; @@ -208,7 +212,7 @@ } public QTestUtil(String outDir, String logDir) throws Exception { - this(outDir, logDir, false, "0.20"); + this(outDir, logDir, MiniClusterType.none, null, "0.20"); } public String getOutputDirectory() { @@ -242,9 +246,8 @@ conf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, "org.apache.hadoop.hive.metastore.VerifyingObjectStore"); - if (miniMr) { + if (mr != null) { assert dfs != null; - assert mr != null; mr.setupConfiguration(conf); @@ -303,21 +306,66 @@ return uriStr; } - public QTestUtil(String outDir, String logDir, boolean miniMr, String hadoopVer) + public enum MiniClusterType { + mr, + tez, + none; + + public static MiniClusterType valueForString(String type) { + if (type.equals("miniMR")) { + return mr; + } else if (type.equals("tez")) { + return tez; + } else { + return none; + } + } + } + + public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, String hadoopVer) throws Exception { + this(outDir, logDir, clusterType, null, hadoopVer); + } + + public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, + String confDir, String hadoopVer) + throws Exception { this.outDir = outDir; this.logDir = logDir; + if (confDir != null && !confDir.isEmpty()) { + HiveConf.setHiveSiteLocation(new URL("file://"+confDir+"/hive-site.xml")); + System.out.println("Setting hive-site: "+HiveConf.getHiveSiteLocation()); + } conf = new HiveConf(Driver.class); - this.miniMr = miniMr; + this.miniMr = (clusterType == MiniClusterType.mr); this.hadoopVer = getHadoopMainVersion(hadoopVer); qMap = new TreeMap(); qSkipSet = new HashSet(); qSortSet = new HashSet(); + this.clusterType = clusterType; - if (miniMr) { - dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, null); + HadoopShims shims = ShimLoader.getHadoopShims(); + int numberOfDataNodes = 4; + + // can run tez tests only on hadoop 2 + if (clusterType == MiniClusterType.tez) { + Assume.assumeTrue(ShimLoader.getMajorVersion().equals("0.23")); + // this is necessary temporarily - there's a probem with multi datanodes on MiniTezCluster + // will be fixed in 0.3 + numberOfDataNodes = 1; + } + + if (clusterType != MiniClusterType.none) { + dfs = shims.getMiniDfs(conf, numberOfDataNodes, true, null); FileSystem fs = dfs.getFileSystem(); - mr = ShimLoader.getHadoopShims().getMiniMrCluster(conf, 4, getHdfsUriString(fs.getUri().toString()), 1); + if (clusterType == MiniClusterType.tez) { + if (!(shims instanceof Hadoop23Shims)) { + throw new Exception("Cannot run tez on hadoop-1, Version: "+this.hadoopVer); + } + mr = ((Hadoop23Shims)shims).getMiniTezCluster(conf, 4, getHdfsUriString(fs.getUri().toString()), 1); + } else { + mr = shims.getMiniMrCluster(conf, 4, getHdfsUriString(fs.getUri().toString()), 1); + } } initConf(); @@ -767,6 +815,11 @@ ss.err = new CachingPrintStream(fo, true, "UTF-8"); ss.setIsSilent(true); SessionState oldSs = SessionState.get(); + + if (oldSs != null && clusterType == MiniClusterType.tez) { + oldSs.close(); + } + if (oldSs != null && oldSs.out != null && oldSs.out != System.out) { oldSs.out.close(); } @@ -1446,7 +1499,7 @@ { QTestUtil[] qt = new QTestUtil[qfiles.length]; for (int i = 0; i < qfiles.length; i++) { - qt[i] = new QTestUtil(resDir, logDir, false, "0.20"); + qt[i] = new QTestUtil(resDir, logDir, MiniClusterType.none, null, "0.20"); qt[i].addFile(qfiles[i]); qt[i].clearTestSideEffects(); } Index: pom.xml =================================================================== --- pom.xml (revision 1553449) +++ pom.xml (working copy) @@ -91,6 +91,7 @@ 3.0.1 2.4 2.4 + 3.1 1.1.3 10.4.2.0 11.0.2 @@ -131,6 +132,7 @@ 1.0.1 1.7.5 4.0.4 + 0.2.0 1.1 0.2 1.4 Index: ql/pom.xml =================================================================== --- ql/pom.xml (revision 1553449) +++ ql/pom.xml (working copy) @@ -82,6 +82,11 @@ ${commons-io.version} + org.apache.commons + commons-lang3 + ${commons-lang3.version} + + commons-lang commons-lang ${commons-lang.version} @@ -210,6 +215,78 @@ ${mockito-all.version} test + + org.apache.tez + tez-api + ${tez.version} + true + + + org.apache.hadoop + hadoop-common + + + org.apache.hadoop + hadoop-mapreduce-client-core + + + org.apache.hadoop + hadoop-hdfs + + + org.apache.hadoop + hadoop-yarn-client + + + + + org.apache.tez + tez-runtime-library + ${tez.version} + true + + + org.apache.hadoop + hadoop-common + + + org.apache.hadoop + hadoop-mapreduce-client-core + + + org.apache.hadoop + hadoop-hdfs + + + org.apache.hadoop + hadoop-yarn-client + + + + + org.apache.tez + tez-mapreduce + ${tez.version} + true + + + org.apache.hadoop + hadoop-common + + + org.apache.hadoop + hadoop-mapreduce-client-core + + + org.apache.hadoop + hadoop-hdfs + + + org.apache.hadoop + hadoop-yarn-client + + + @@ -239,6 +316,29 @@ ${hadoop-23.version} true + + org.apache.hadoop + hadoop-hdfs + ${hadoop-23.version} + + + org.apache.hadoop + hadoop-yarn-api + ${hadoop-23.version} + true + + + org.apache.hadoop + hadoop-yarn-common + ${hadoop-23.version} + true + + + org.apache.hadoop + hadoop-yarn-client + ${hadoop-23.version} + true + Index: ql/src/java/org/apache/hadoop/hive/ql/Driver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java (working copy) @@ -1212,7 +1212,8 @@ } - int jobs = Utilities.getMRTasks(plan.getRootTasks()).size(); + int jobs = Utilities.getMRTasks(plan.getRootTasks()).size() + + Utilities.getTezTasks(plan.getRootTasks()).size(); if (jobs > 0) { console.printInfo("Total MapReduce jobs = " + jobs); } Index: ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java (working copy) @@ -366,6 +366,11 @@ UNSUPPORTED_SUBQUERY_EXPRESSION(10249, "Unsupported SubQuery Expression"), INVALID_SUBQUERY_EXPRESSION(10250, "Invalid SubQuery expression"), + INVALID_HDFS_URI(10251, "{0} is not a hdfs uri", true), + INVALID_DIR(10252, "{0} is not a directory", true), + NO_VALID_LOCATIONS(10253, "Could not find any valid location to place the jars. " + + "Please update hive.jar.directory or hive.user.install.directory with a valid location", false), + SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."), SCRIPT_IO_ERROR(20001, "An error occurred while reading or writing to your custom script. " + "It may have crashed with an error."), Index: ql/src/java/org/apache/hadoop/hive/ql/HashTableLoaderFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/HashTableLoaderFactory.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/HashTableLoaderFactory.java (working copy) @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.ql.exec.HashTableLoader; + +/** + * HashTableLoaderFactory is used to determine the strategy + * of loading the hashtables for the MapJoinOperator + */ +public class HashTableLoaderFactory { + + private HashTableLoaderFactory() { + } + + public static HashTableLoader getLoader(Configuration hconf) { + if (HiveConf.getBoolVar(hconf, ConfVars.HIVE_OPTIMIZE_TEZ)) { + return new org.apache.hadoop.hive.ql.exec.tez.HashTableLoader(); + } else { + return new org.apache.hadoop.hive.ql.exec.mr.HashTableLoader(); + } + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java (working copy) @@ -149,4 +149,11 @@ protected boolean hasAnyNulls(MapJoinKey key) { return key.hasAnyNulls(nullsafes); } + + @Override + public void closeOp(boolean abort) throws HiveException { + super.closeOp(abort); + emptyList = null; + joinKeys = null; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java (working copy) @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.MapJoinDesc; + +/** + * HashTableLoader is an interface used by MapJoinOperator used to load the hashtables + * needed to process the join. + */ +public interface HashTableLoader { + + void load(ExecMapperContext context, Configuration hconf, MapJoinDesc desc, byte posBigTable, + MapJoinTableContainer[] mapJoinTables, MapJoinTableContainerSerDe[] mapJoinTableSerdes) + throws HiveException; +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java (working copy) @@ -18,9 +18,6 @@ package org.apache.hadoop.hive.ql.exec; -import java.io.BufferedInputStream; -import java.io.FileInputStream; -import java.io.ObjectInputStream; import java.io.Serializable; import java.util.List; @@ -27,14 +24,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.filecache.DistributedCache; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.HashTableLoaderFactory; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinObjectSerDeContext; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinRowContainer; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe; +import org.apache.hadoop.hive.ql.log.PerfLogger; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.MapJoinDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; @@ -41,7 +37,6 @@ import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.serde2.SerDe; import org.apache.hadoop.hive.serde2.SerDeException; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.util.ReflectionUtils; /** @@ -51,7 +46,15 @@ private static final long serialVersionUID = 1L; private static final Log LOG = LogFactory.getLog(MapJoinOperator.class.getName()); + private static final String CLASS_NAME = MapJoinOperator.class.getName(); + private final PerfLogger perfLogger = PerfLogger.getPerfLogger(); + private transient String tableKey; + private transient String serdeKey; + private transient ObjectCache cache; + + private HashTableLoader loader; + protected transient MapJoinTableContainer[] mapJoinTables; private transient MapJoinTableContainerSerDe[] mapJoinTableSerdes; private transient boolean hashTblInitedOnce; @@ -64,13 +67,41 @@ super(mjop); } + /* + * We need the base (operator.java) implementation of start/endGroup. + * The parent class has functionality in those that map join can't use. + */ @Override + public void endGroup() throws HiveException { + defaultEndGroup(); + } + + @Override + public void startGroup() throws HiveException { + defaultStartGroup(); + } + + @Override protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); + int tagLen = conf.getTagLength(); - mapJoinTables = new MapJoinTableContainer[tagLen]; - mapJoinTableSerdes = new MapJoinTableContainerSerDe[tagLen]; - hashTblInitedOnce = false; + + tableKey = "__HASH_MAP_"+this.getOperatorId()+"_container"; + serdeKey = "__HASH_MAP_"+this.getOperatorId()+"_serde"; + + cache = ObjectCacheFactory.getCache(hconf); + loader = HashTableLoaderFactory.getLoader(hconf); + + mapJoinTables = (MapJoinTableContainer[]) cache.retrieve(tableKey); + mapJoinTableSerdes = (MapJoinTableContainerSerDe[]) cache.retrieve(serdeKey); + hashTblInitedOnce = true; + + if (mapJoinTables == null || mapJoinTableSerdes == null) { + mapJoinTables = new MapJoinTableContainer[tagLen]; + mapJoinTableSerdes = new MapJoinTableContainerSerDe[tagLen]; + hashTblInitedOnce = false; + } } public void generateMapMetaData() throws HiveException, SerDeException { @@ -101,7 +132,9 @@ } private void loadHashTable() throws HiveException { - if (!this.getExecContext().getLocalWork().getInputFileChangeSensitive()) { + + if (this.getExecContext().getLocalWork() == null + || !this.getExecContext().getLocalWork().getInputFileChangeSensitive()) { if (hashTblInitedOnce) { return; } else { @@ -108,51 +141,12 @@ hashTblInitedOnce = true; } } - - String baseDir = null; - String currentInputFile = getExecContext().getCurrentInputPath().toString(); - LOG.info("******* Load from HashTable File: input : " + currentInputFile); - String fileName = getExecContext().getLocalWork().getBucketFileName(currentInputFile); - try { - if (ShimLoader.getHadoopShims().isLocalMode(hconf)) { - baseDir = this.getExecContext().getLocalWork().getTmpFileURI(); - } else { - Path[] localArchives; - String stageID = this.getExecContext().getLocalWork().getStageID(); - String suffix = Utilities.generateTarFileName(stageID); - FileSystem localFs = FileSystem.getLocal(hconf); - localArchives = DistributedCache.getLocalCacheArchives(this.hconf); - Path archive; - for (int j = 0; j < localArchives.length; j++) { - archive = localArchives[j]; - if (!archive.getName().endsWith(suffix)) { - continue; - } - Path archiveLocalLink = archive.makeQualified(localFs); - baseDir = archiveLocalLink.toUri().getPath(); - } - } - for (int pos = 0; pos < mapJoinTables.length; pos++) { - if (pos == posBigTable) { - continue; - } - if(baseDir == null) { - throw new IllegalStateException("baseDir cannot be null"); - } - String filePath = Utilities.generatePath(baseDir, conf.getDumpFilePrefix(), (byte)pos, fileName); - Path path = new Path(filePath); - LOG.info("\tLoad back 1 hashtable file from tmp file uri:" + path); - ObjectInputStream in = new ObjectInputStream(new BufferedInputStream( - new FileInputStream(path.toUri().getPath()), 4096)); - try{ - mapJoinTables[pos] = mapJoinTableSerdes[pos].load(in); - } finally { - in.close(); - } - } - } catch (Exception e) { - throw new HiveException(e); - } + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.LOAD_HASHTABLE); + loader.load(this.getExecContext(), hconf, this.getConf(), + posBigTable, mapJoinTables, mapJoinTableSerdes); + cache.cache(tableKey, mapJoinTables); + cache.cache(serdeKey, mapJoinTableSerdes); + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.LOAD_HASHTABLE); } // Load the hash table @@ -179,8 +173,8 @@ public void processOp(Object row, int tag) throws HiveException { try { if (firstRow) { - // generate the map metadata generateMapMetaData(); + loadHashTable(); firstRow = false; } alias = (byte)tag; @@ -227,7 +221,9 @@ @Override public void closeOp(boolean abort) throws HiveException { - if (mapJoinTables != null) { + if ((this.getExecContext().getLocalWork() != null + && this.getExecContext().getLocalWork().getInputFileChangeSensitive()) + && mapJoinTables != null) { for (MapJoinTableContainer tableContainer : mapJoinTables) { if (tableContainer != null) { tableContainer.clear(); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (working copy) @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.ql.io.IOContext; import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; @@ -326,8 +327,7 @@ public void setChildren(Configuration hconf) throws HiveException { - Path fpath = new Path(HiveConf.getVar(hconf, - HiveConf.ConfVars.HADOOPMAPFILENAME)); + Path fpath = IOContext.get().getInputPath(); boolean schemeless = fpath.toUri().getScheme() == null; @@ -350,8 +350,10 @@ for (String onealias : aliases) { Operator op = conf.getAliasToWork().get(onealias); - LOG.info("Adding alias " + onealias + " to work list for file " - + onefile); + if (LOG.isDebugEnabled()) { + LOG.debug("Adding alias " + onealias + " to work list for file " + + onefile); + } MapInputPath inp = new MapInputPath(onefile, onealias, op, partDesc); if (opCtxMap.containsKey(inp)) { continue; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java (working copy) @@ -26,6 +26,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.ql.exec.tez.TezContext; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF; @@ -45,7 +48,9 @@ } public static MapredContext init(boolean isMap, JobConf jobConf) { - MapredContext context = new MapredContext(isMap, jobConf); + MapredContext context = + HiveConf.getBoolVar(jobConf, ConfVars.HIVE_OPTIMIZE_TEZ) ? + new TezContext(isMap, jobConf) : new MapredContext(isMap, jobConf); contexts.set(context); return context; } @@ -64,7 +69,7 @@ private Reporter reporter; - private MapredContext(boolean isMap, JobConf jobConf) { + protected MapredContext(boolean isMap, JobConf jobConf) { this.isMap = isMap; this.jobConf = jobConf; this.udfs = new ArrayList(); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java (working copy) @@ -215,7 +215,7 @@ protected void initializeChildren(Configuration hconf) throws HiveException { state = State.INIT; LOG.info("Operator " + id + " " + getName() + " initialized"); - if (childOperators == null) { + if (childOperators == null || childOperators.isEmpty()) { return; } LOG.info("Initializing children of " + id + " " + getName()); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ObjectCache.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ObjectCache.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ObjectCache.java (working copy) @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec; + +/** + * ObjectCache. Interface for maintaining objects associated with a task. + */ +public interface ObjectCache { + /** + * Add an object to the cache + * @param key + * @param value + */ + public void cache(String key, Object value); + + /** + * Retrieve object from cache. + * @param key + * @return the last cached object with the key, null if none. + */ + public Object retrieve(String key); +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ObjectCacheFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ObjectCacheFactory.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ObjectCacheFactory.java (working copy) @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; + +/** + * ObjectCacheFactory returns the appropriate cache depending on settings in + * the hive conf. + */ +public class ObjectCacheFactory { + + private ObjectCacheFactory() { + // avoid instantiation + } + + /** + * Returns the appropriate cache + */ + public static ObjectCache getCache(Configuration conf) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_OPTIMIZE_TEZ)) { + return new org.apache.hadoop.hive.ql.exec.tez.ObjectCache(); + } else { + return new org.apache.hadoop.hive.ql.exec.mr.ObjectCache(); + } + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java (working copy) @@ -32,6 +32,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -98,6 +99,8 @@ public Operator() { id = String.valueOf(seqId.getAndIncrement()); + childOperators = new ArrayList>(); + parentOperators = new ArrayList>(); initOperatorId(); } @@ -118,6 +121,9 @@ public void setChildOperators( List> childOperators) { + if (childOperators == null) { + childOperators = new ArrayList>(); + } this.childOperators = childOperators; } @@ -151,6 +157,9 @@ public void setParentOperators( List> parentOperators) { + if (parentOperators == null) { + parentOperators = new ArrayList>(); + } this.parentOperators = parentOperators; } @@ -333,7 +342,7 @@ // initialize structure to maintain child op info. operator tree changes // while // initializing so this need to be done here instead of initialize() method - if (childOperators != null) { + if (childOperators != null && !childOperators.isEmpty()) { childOperatorsArray = new Operator[childOperators.size()]; for (int i = 0; i < childOperatorsArray.length; i++) { childOperatorsArray[i] = childOperators.get(i); @@ -364,6 +373,14 @@ passExecContext(this.execContext); initializeOp(hconf); + + // sanity check + if (childOperatorsArray == null + && !(childOperators == null || childOperators.isEmpty())) { + throw new HiveException( + "Internal Hive error during operator initialization."); + } + LOG.info("Initialization Done " + id + " " + getName()); } @@ -390,7 +407,7 @@ protected void initializeChildren(Configuration hconf) throws HiveException { state = State.INIT; LOG.info("Operator " + id + " " + getName() + " initialized"); - if (childOperators == null) { + if (childOperators == null || childOperators.isEmpty()) { return; } LOG.info("Initializing children of " + id + " " + getName()); @@ -466,8 +483,7 @@ */ public abstract void processOp(Object row, int tag) throws HiveException; - // If a operator wants to do some work at the beginning of a group - public void startGroup() throws HiveException { + protected final void defaultStartGroup() throws HiveException { LOG.debug("Starting group"); if (childOperators == null) { @@ -482,8 +498,7 @@ LOG.debug("Start group Done"); } - // If an operator wants to do some work at the end of a group - public void endGroup() throws HiveException { + protected final void defaultEndGroup() throws HiveException { LOG.debug("Ending group"); if (childOperators == null) { @@ -498,6 +513,16 @@ LOG.debug("End group Done"); } + // If a operator wants to do some work at the beginning of a group + public void startGroup() throws HiveException { + defaultStartGroup(); + } + + // If an operator wants to do some work at the end of a group + public void endGroup() throws HiveException { + defaultEndGroup(); + } + // an blocking operator (e.g. GroupByOperator and JoinOperator) can // override this method to forward its outputs public void flush() throws HiveException { @@ -504,7 +529,7 @@ } public void processGroup(int tag) throws HiveException { - if (childOperators == null) { + if (childOperators == null || childOperators.isEmpty()) { return; } for (int i = 0; i < childOperatorsArray.length; i++) { @@ -548,6 +573,8 @@ // call the operator specific close routine closeOp(abort); + reporter = null; + try { logStats(); if (childOperators == null) { @@ -632,7 +659,7 @@ int childIndex = childOperators.indexOf(child); assert childIndex != -1; if (childOperators.size() == 1) { - childOperators = null; + setChildOperators(null); } else { childOperators.remove(childIndex); } @@ -681,7 +708,7 @@ int parentIndex = parentOperators.indexOf(parent); assert parentIndex != -1; if (parentOperators.size() == 1) { - parentOperators = null; + setParentOperators(null); } else { parentOperators.remove(parentIndex); } @@ -701,7 +728,7 @@ Operator currOp = this; for (int i = 0; i < depth; i++) { // If there are more than 1 children at any level, don't do anything - if ((currOp.getChildOperators() == null) || + if ((currOp.getChildOperators() == null) || (currOp.getChildOperators().isEmpty()) || (currOp.getChildOperators().size() > 1)) { return false; } @@ -750,11 +777,6 @@ protected void forward(Object row, ObjectInspector rowInspector) throws HiveException { - if (childOperatorsArray == null && childOperators != null) { - throw new HiveException( - "Internal Hive error during operator initialization."); - } - if ((childOperatorsArray == null) || (getDone())) { return; } @@ -1223,8 +1245,13 @@ } public void setStatistics(Statistics stats) { + if (LOG.isDebugEnabled()) { + LOG.debug("Setting stats ("+stats+") on "+this); + } if (conf != null) { conf.setStatistics(stats); + } else { + LOG.warn("Cannot set stats when there's no descriptor: "+this); } } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java (working copy) @@ -21,13 +21,18 @@ import java.util.Collection; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.mapred.OutputCollector; public class OperatorUtils { + private static final Log LOG = LogFactory.getLog(OperatorUtils.class); + public static Set findOperators(Operator start, Class clazz) { return findOperators(start, clazz, new HashSet()); } @@ -63,7 +68,7 @@ return; } for (Operator op : childOperators) { - if(op.getName().equals(ReduceSinkOperator.getOperatorName())) { //TODO: + if(op.getName().equals(ReduceSinkOperator.getOperatorName())) { ((ReduceSinkOperator)op).setOutputCollector(out); } else { setChildrenCollector(op.getChildOperators(), out); @@ -70,4 +75,22 @@ } } } + + public static void setChildrenCollector(List> childOperators, Map outMap) { + if (childOperators == null) { + return; + } + for (Operator op : childOperators) { + if(op.getName().equals(ReduceSinkOperator.getOperatorName())) { + ReduceSinkOperator rs = ((ReduceSinkOperator)op); + if (outMap.containsKey(rs.getConf().getOutputName())) { + LOG.info("Setting output collector: " + rs + " --> " + + rs.getConf().getOutputName()); + rs.setOutputCollector(outMap.get(rs.getConf().getOutputName())); + } + } else { + setChildrenCollector(op.getChildOperators(), outMap); + } + } + } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java (working copy) @@ -504,10 +504,16 @@ Throwable getException() { return exception; } + void setException(Throwable ex) { exception = ex; } + public void setConsole(LogHelper console) { + this.console = console; + } + + @Override public String toString() { return getId() + ":" + getType(); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java (working copy) @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; import org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask; +import org.apache.hadoop.hive.ql.exec.tez.TezTask; import org.apache.hadoop.hive.ql.io.rcfile.merge.BlockMergeTask; import org.apache.hadoop.hive.ql.io.rcfile.merge.MergeWork; import org.apache.hadoop.hive.ql.io.rcfile.stats.PartialScanTask; @@ -41,6 +42,7 @@ import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.StatsWork; +import org.apache.hadoop.hive.ql.plan.TezWork; /** * TaskFactory implementation. @@ -89,6 +91,7 @@ DependencyCollectionTask.class)); taskvec.add(new taskTuple(PartialScanWork.class, PartialScanTask.class)); + taskvec.add(new taskTuple(TezWork.class, TezTask.class)); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (working copy) @@ -88,7 +88,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.filecache.DistributedCache; import org.apache.hadoop.fs.ContentSummary; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -97,6 +96,7 @@ import org.apache.hadoop.hive.common.HiveInterruptUtils; import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; @@ -108,6 +108,7 @@ import org.apache.hadoop.hive.ql.exec.mr.ExecMapper; import org.apache.hadoop.hive.ql.exec.mr.ExecReducer; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; +import org.apache.hadoop.hive.ql.exec.tez.TezTask; import org.apache.hadoop.hive.ql.io.ContentSummaryInputFormat; import org.apache.hadoop.hive.ql.io.FSRecordWriter; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; @@ -131,9 +132,12 @@ import org.apache.hadoop.hive.ql.metadata.InputEstimator; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; +import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.BaseWork; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.GroupByDesc; @@ -177,6 +181,8 @@ import com.esotericsoftware.kryo.io.Output; import com.esotericsoftware.kryo.serializers.FieldSerializer; +import org.apache.commons.codec.binary.Base64; + /** * Utilities. * @@ -259,15 +265,31 @@ return w; } + public static void setMapWork(Configuration conf, MapWork work) { + setBaseWork(conf, MAP_PLAN_NAME, work); + } + public static MapWork getMapWork(Configuration conf) { return (MapWork) getBaseWork(conf, MAP_PLAN_NAME); } + public static void setReduceWork(Configuration conf, ReduceWork work) { + setBaseWork(conf, REDUCE_PLAN_NAME, work); + } + public static ReduceWork getReduceWork(Configuration conf) { return (ReduceWork) getBaseWork(conf, REDUCE_PLAN_NAME); } /** + * Pushes work into the global work map + */ + public static void setBaseWork(Configuration conf, String name, BaseWork work) { + Path path = getPlanPath(conf, name); + gWorkMap.put(path, work); + } + + /** * Returns the Map or Reduce plan * Side effect: the BaseWork returned is also placed in the gWorkMap * @param conf @@ -282,8 +304,7 @@ try { path = getPlanPath(conf, name); assert path != null; - gWork = gWorkMap.get(path); - if (gWork == null) { + if (!gWorkMap.containsKey(path)) { Path localPath; if (ShimLoader.getHadoopShims().isLocalMode(conf)) { localPath = path; @@ -290,7 +311,20 @@ } else { localPath = new Path(name); } - in = new FileInputStream(localPath.toUri().getPath()); + + if (HiveConf.getBoolVar(conf, ConfVars.HIVE_RPC_QUERY_PLAN)) { + LOG.debug("Loading plan from: "+path.toUri().getPath()); + String planString = conf.get(path.toUri().getPath()); + if (planString == null) { + LOG.debug("Could not find plan string in conf"); + return null; + } + byte[] planBytes = Base64.decodeBase64(planString); + in = new ByteArrayInputStream(planBytes); + } else { + in = new FileInputStream(localPath.toUri().getPath()); + } + if(MAP_PLAN_NAME.equals(name)){ if (ExecMapper.class.getName().equals(conf.get(MAPRED_MAPPER_CLASS))){ gWork = deserializePlan(in, MapWork.class, conf); @@ -313,6 +347,9 @@ } } gWorkMap.put(path, gWork); + } else { + LOG.debug("Found plan in cache."); + gWork = gWorkMap.get(path); } return gWork; } catch (FileNotFoundException fnf) { @@ -535,26 +572,37 @@ Path planPath = getPlanPath(conf, name); - // use the default file system of the conf - FileSystem fs = planPath.getFileSystem(conf); - FSDataOutputStream out = fs.create(planPath); - serializePlan(w, out, conf); + OutputStream out; - // Serialize the plan to the default hdfs instance - // Except for hadoop local mode execution where we should be - // able to get the plan directly from the cache - if (useCache && !ShimLoader.getHadoopShims().isLocalMode(conf)) { - // Set up distributed cache - if (!DistributedCache.getSymlink(conf)) { - DistributedCache.createSymlink(conf); + if (HiveConf.getBoolVar(conf, ConfVars.HIVE_RPC_QUERY_PLAN)) { + // add it to the conf + out = new ByteArrayOutputStream(); + serializePlan(w, out, conf); + LOG.info("Setting plan: "+planPath.toUri().getPath()); + conf.set(planPath.toUri().getPath(), + Base64.encodeBase64String(((ByteArrayOutputStream)out).toByteArray())); + } else { + // use the default file system of the conf + FileSystem fs = planPath.getFileSystem(conf); + out = fs.create(planPath); + serializePlan(w, out, conf); + + // Serialize the plan to the default hdfs instance + // Except for hadoop local mode execution where we should be + // able to get the plan directly from the cache + if (useCache && !ShimLoader.getHadoopShims().isLocalMode(conf)) { + // Set up distributed cache + if (!DistributedCache.getSymlink(conf)) { + DistributedCache.createSymlink(conf); + } + String uriWithLink = planPath.toUri().toString() + "#" + name; + DistributedCache.addCacheFile(new URI(uriWithLink), conf); + + // set replication of the plan file to a high number. we use the same + // replication factor as used by the hadoop jobclient for job.xml etc. + short replication = (short) conf.getInt("mapred.submit.replication", 10); + fs.setReplication(planPath, replication); } - String uriWithLink = planPath.toUri().toString() + "#" + name; - DistributedCache.addCacheFile(new URI(uriWithLink), conf); - - // set replication of the plan file to a high number. we use the same - // replication factor as used by the hadoop jobclient for job.xml etc. - short replication = (short) conf.getInt("mapred.submit.replication", 10); - fs.setReplication(planPath, replication); } // Cache the plan in this process @@ -2200,6 +2248,26 @@ return true; } + public static List getTezTasks(List> tasks) { + List tezTasks = new ArrayList(); + if (tasks != null) { + getTezTasks(tasks, tezTasks); + } + return tezTasks; + } + + private static void getTezTasks(List> tasks, List tezTasks) { + for (Task task : tasks) { + if (task instanceof TezTask && !tezTasks.contains((TezTask) task)) { + tezTasks.add((TezTask) task); + } + + if (task.getDependentTasks() != null) { + getTezTasks(task.getDependentTasks(), tezTasks); + } + } + } + public static List getMRTasks(List> tasks) { List mrTasks = new ArrayList(); if (tasks != null) { @@ -2705,22 +2773,30 @@ + maxReducers + " totalInputFileSize=" + totalInputFileSize); } + // If this map reduce job writes final data to a table and bucketing is being inferred, + // and the user has configured Hive to do this, make sure the number of reducers is a + // power of two + boolean powersOfTwo = conf.getBoolVar(HiveConf.ConfVars.HIVE_INFER_BUCKET_SORT_NUM_BUCKETS_POWER_TWO) && + finalMapRed && !work.getBucketedColsByDirectory().isEmpty(); + + return estimateReducers(totalInputFileSize, bytesPerReducer, maxReducers, powersOfTwo); + } + + public static int estimateReducers(long totalInputFileSize, long bytesPerReducer, + int maxReducers, boolean powersOfTwo) { + int reducers = (int) ((totalInputFileSize + bytesPerReducer - 1) / bytesPerReducer); reducers = Math.max(1, reducers); reducers = Math.min(maxReducers, reducers); - // If this map reduce job writes final data to a table and bucketing is being inferred, - // and the user has configured Hive to do this, make sure the number of reducers is a - // power of two - if (conf.getBoolVar(HiveConf.ConfVars.HIVE_INFER_BUCKET_SORT_NUM_BUCKETS_POWER_TWO) && - finalMapRed && !work.getBucketedColsByDirectory().isEmpty()) { - int reducersLog = (int)(Math.log(reducers) / Math.log(2)) + 1; - int reducersPowerTwo = (int)Math.pow(2, reducersLog); + int reducersLog = (int)(Math.log(reducers) / Math.log(2)) + 1; + int reducersPowerTwo = (int)Math.pow(2, reducersLog); + if (powersOfTwo) { // If the original number of reducers was a power of two, use that if (reducersPowerTwo / 2 == reducers) { - return reducers; + // nothing to do } else if (reducersPowerTwo > maxReducers) { // If the next power of two greater than the original number of reducers is greater // than the max number of reducers, use the preceding power of two, which is strictly @@ -2731,7 +2807,6 @@ reducers = reducersPowerTwo; } } - return reducers; } @@ -2843,7 +2918,7 @@ pathsProcessed.add(path); LOG.info("Adding input file " + path); - if (isEmptyPath(job, path, ctx)) { + if (!HiveConf.getBoolVar(job, ConfVars.HIVE_OPTIMIZE_TEZ) && isEmptyPath(job, path, ctx)) { path = createDummyFileForEmptyPartition(path, job, work, hiveScratchDir, alias, sequenceNumber++); @@ -2860,7 +2935,7 @@ // T2) x; // If T is empty and T2 contains 100 rows, the user expects: 0, 100 (2 // rows) - if (path == null) { + if (path == null && !HiveConf.getBoolVar(job, ConfVars.HIVE_OPTIMIZE_TEZ)) { path = createDummyFileForEmptyTable(job, work, hiveScratchDir, alias, sequenceNumber++); pathsToAdd.add(path); @@ -3088,6 +3163,10 @@ } } + public static void clearWorkMap() { + gWorkMap.clear(); + } + /** * Create a temp dir in specified baseDir * This can go away once hive moves to support only JDK 7 @@ -3114,4 +3193,3 @@ } } - Index: ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java (working copy) @@ -31,6 +31,8 @@ import org.apache.hadoop.hive.ql.exec.FetchOperator; import org.apache.hadoop.hive.ql.exec.MapOperator; import org.apache.hadoop.hive.ql.exec.MapredContext; +import org.apache.hadoop.hive.ql.exec.ObjectCache; +import org.apache.hadoop.hive.ql.exec.ObjectCacheFactory; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.OperatorUtils; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -59,6 +61,7 @@ */ public class ExecMapper extends MapReduceBase implements Mapper { + private static final String PLAN_KEY = "__MAP_PLAN__"; private MapOperator mo; private Map fetchOperators; private OutputCollector oc; @@ -94,11 +97,22 @@ } catch (Exception e) { l4j.info("cannot get classpath: " + e.getMessage()); } + + setDone(false); + + ObjectCache cache = ObjectCacheFactory.getCache(job); + try { jc = job; execContext.setJc(jc); // create map and fetch operators - MapWork mrwork = Utilities.getMapWork(job); + MapWork mrwork = (MapWork) cache.retrieve(PLAN_KEY); + if (mrwork == null) { + mrwork = Utilities.getMapWork(job); + cache.cache(PLAN_KEY, mrwork); + } else { + Utilities.setMapWork(job, mrwork); + } if (mrwork.getVectorMode()) { mo = new VectorMapOperator(); } else { @@ -247,6 +261,7 @@ } } finally { MapredContext.close(); + Utilities.clearWorkMap(); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java (working copy) @@ -30,6 +30,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.MapredContext; +import org.apache.hadoop.hive.ql.exec.ObjectCache; +import org.apache.hadoop.hive.ql.exec.ObjectCacheFactory; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.mr.ExecMapper.reportStats; @@ -64,6 +66,8 @@ */ public class ExecReducer extends MapReduceBase implements Reducer { + private static final String PLAN_KEY = "__REDUCE_PLAN__"; + private JobConf jc; private OutputCollector oc; private Operator reducer; @@ -112,7 +116,16 @@ l4j.info("cannot get classpath: " + e.getMessage()); } jc = job; - ReduceWork gWork = Utilities.getReduceWork(job); + + ObjectCache cache = ObjectCacheFactory.getCache(jc); + ReduceWork gWork = (ReduceWork) cache.retrieve(PLAN_KEY); + if (gWork == null) { + gWork = Utilities.getReduceWork(job); + cache.cache(PLAN_KEY, gWork); + } else { + Utilities.setReduceWork(job, gWork); + } + reducer = gWork.getReducer(); reducer.setParentOperators(null); // clear out any parents as reducer is the // root @@ -314,6 +327,7 @@ } } finally { MapredContext.close(); + Utilities.clearWorkMap(); } } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java (working copy) @@ -128,10 +128,6 @@ this.jobId = jobId; } - - public HadoopJobExecHelper() { - } - public HadoopJobExecHelper(JobConf job, LogHelper console, Task task, HadoopJobExecHook hookCallBack) { this.job = job; @@ -138,6 +134,12 @@ this.console = console; this.task = task; this.callBackObj = hookCallBack; + + if (job != null) { + // even with tez on some jobs are run as MR. disable the flag in + // the conf, so that the backend runs fully as MR. + HiveConf.setBoolVar(job, HiveConf.ConfVars.HIVE_OPTIMIZE_TEZ, false); + } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java (working copy) @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.mr; + +import java.io.BufferedInputStream; +import java.io.FileInputStream; +import java.io.ObjectInputStream; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.filecache.DistributedCache; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.exec.MapJoinOperator; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.MapJoinDesc; +import org.apache.hadoop.hive.shims.ShimLoader; + +/** + * HashTableLoader for MR loads the hashtable for MapJoins from local disk (hashtables + * are distributed by using the DistributedCache. + * + */ +public class HashTableLoader implements org.apache.hadoop.hive.ql.exec.HashTableLoader { + + private static final Log LOG = LogFactory.getLog(MapJoinOperator.class.getName()); + + public HashTableLoader() { + } + + @Override + public void load(ExecMapperContext context, + Configuration hconf, + MapJoinDesc desc, + byte posBigTable, + MapJoinTableContainer[] mapJoinTables, + MapJoinTableContainerSerDe[] mapJoinTableSerdes) throws HiveException { + + String baseDir = null; + Path currentInputPath = context.getCurrentInputPath(); + LOG.info("******* Load from HashTable File: input : " + currentInputPath); + String fileName = context.getLocalWork().getBucketFileName(currentInputPath.toString()); + try { + if (ShimLoader.getHadoopShims().isLocalMode(hconf)) { + baseDir = context.getLocalWork().getTmpFileURI(); + } else { + Path[] localArchives; + String stageID = context.getLocalWork().getStageID(); + String suffix = Utilities.generateTarFileName(stageID); + FileSystem localFs = FileSystem.getLocal(hconf); + localArchives = DistributedCache.getLocalCacheArchives(hconf); + Path archive; + for (int j = 0; j < localArchives.length; j++) { + archive = localArchives[j]; + if (!archive.getName().endsWith(suffix)) { + continue; + } + Path archiveLocalLink = archive.makeQualified(localFs); + baseDir = archiveLocalLink.toUri().getPath(); + } + } + for (int pos = 0; pos < mapJoinTables.length; pos++) { + if (pos == posBigTable) { + continue; + } + if(baseDir == null) { + throw new IllegalStateException("baseDir cannot be null"); + } + String filePath = Utilities.generatePath(baseDir, desc.getDumpFilePrefix(), (byte)pos, fileName); + Path path = new Path(filePath); + LOG.info("\tLoad back 1 hashtable file from tmp file uri:" + path); + ObjectInputStream in = new ObjectInputStream(new BufferedInputStream( + new FileInputStream(path.toUri().getPath()), 4096)); + try{ + mapJoinTables[pos] = mapJoinTableSerdes[pos].load(in); + } finally { + in.close(); + } + } + } catch (Exception e) { + throw new HiveException(e); + } + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java (working copy) @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.mr; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + + +/** + * ObjectCache. No-op implementation on MR we don't have a means to reuse + * Objects between runs of the same task. + * + */ +public class ObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCache { + + private static final Log LOG = LogFactory.getLog(ObjectCache.class.getName()); + + @Override + public void cache(String key, Object value) { + LOG.info("Ignoring cache key: "+key); + } + + @Override + public Object retrieve(String key) { + LOG.info("Ignoring retrieval request: "+key); + return null; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java (working copy) @@ -79,13 +79,19 @@ return false; return true; } + + public void read(MapJoinObjectSerDeContext context, ObjectInputStream in, Writable container) + throws IOException, SerDeException { + container.readFields(in); + read(context, container); + } + @SuppressWarnings("unchecked") - public void read(MapJoinObjectSerDeContext context, ObjectInputStream in, Writable container) - throws IOException, SerDeException { + public void read(MapJoinObjectSerDeContext context, Writable container) throws SerDeException { SerDe serde = context.getSerDe(); - container.readFields(in); List value = (List)ObjectInspectorUtils.copyToStandardObject(serde.deserialize(container), serde.getObjectInspector(), ObjectInspectorCopyOption.WRITABLE); + if(value == null) { key = EMPTY_OBJECT_ARRAY; } else { @@ -92,8 +98,8 @@ key = value.toArray(); } } - - public void write(MapJoinObjectSerDeContext context, ObjectOutputStream out) + + public void write(MapJoinObjectSerDeContext context, ObjectOutputStream out) throws IOException, SerDeException { SerDe serde = context.getSerDe(); ObjectInspector objectInspector = context.getStandardOI(); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinRowContainer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinRowContainer.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinRowContainer.java (working copy) @@ -104,30 +104,34 @@ } return result; } - - @SuppressWarnings({"unchecked"}) - public void read(MapJoinObjectSerDeContext context, ObjectInputStream in, Writable container) + + public void read(MapJoinObjectSerDeContext context, ObjectInputStream in, Writable container) throws IOException, SerDeException { clear(); - SerDe serde = context.getSerDe(); long numRows = in.readLong(); for (long rowIndex = 0L; rowIndex < numRows; rowIndex++) { - container.readFields(in); - List value = (List)ObjectInspectorUtils.copyToStandardObject(serde.deserialize(container), - serde.getObjectInspector(), ObjectInspectorCopyOption.WRITABLE); - if(value == null) { - add(toList(EMPTY_OBJECT_ARRAY)); - } else { - Object[] valuesArray = value.toArray(); - if (context.hasFilterTag()) { - aliasFilter &= ((ShortWritable)valuesArray[valuesArray.length - 1]).get(); - } - add(toList(valuesArray)); + container.readFields(in); + read(context, container); + } + } + + @SuppressWarnings("unchecked") + public void read(MapJoinObjectSerDeContext context, Writable currentValue) throws SerDeException { + SerDe serde = context.getSerDe(); + List value = (List)ObjectInspectorUtils.copyToStandardObject(serde.deserialize(currentValue), + serde.getObjectInspector(), ObjectInspectorCopyOption.WRITABLE); + if(value == null) { + add(toList(EMPTY_OBJECT_ARRAY)); + } else { + Object[] valuesArray = value.toArray(); + if (context.hasFilterTag()) { + aliasFilter &= ((ShortWritable)valuesArray[valuesArray.length - 1]).get(); } + add(toList(valuesArray)); } } - - public void write(MapJoinObjectSerDeContext context, ObjectOutputStream out) + + public void write(MapJoinObjectSerDeContext context, ObjectOutputStream out) throws IOException, SerDeException { SerDe serde = context.getSerDe(); ObjectInspector valueObjectInspector = context.getStandardOI(); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java (working copy) @@ -40,6 +40,14 @@ this.keyContext = keyContext; this.valueContext = valueContext; } + + public MapJoinObjectSerDeContext getKeyContext() { + return keyContext; + } + public MapJoinObjectSerDeContext getValueContext() { + return valueContext; + } + @SuppressWarnings({"unchecked"}) public MapJoinTableContainer load(ObjectInputStream in) throws HiveException { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java (working copy) @@ -0,0 +1,693 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.security.auth.login.LoginException; + +import org.apache.commons.io.FilenameUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.mr.ExecMapper; +import org.apache.hadoop.hive.ql.exec.mr.ExecReducer; +import org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; +import org.apache.hadoop.hive.ql.io.HiveInputFormat; +import org.apache.hadoop.hive.ql.io.HiveKey; +import org.apache.hadoop.hive.ql.io.HiveOutputFormatImpl; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.BaseWork; +import org.apache.hadoop.hive.ql.plan.MapWork; +import org.apache.hadoop.hive.ql.plan.ReduceWork; +import org.apache.hadoop.hive.ql.plan.TezWork.EdgeType; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.ql.stats.StatsFactory; +import org.apache.hadoop.hive.ql.stats.StatsPublisher; +import org.apache.hadoop.hive.shims.HadoopShimsSecure.NullOutputCommitter; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputFormat; +import org.apache.hadoop.mapred.split.TezGroupedSplitsInputFormat; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceType; +import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; +import org.apache.hadoop.yarn.api.records.URL; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.apache.hadoop.yarn.util.Records; +import org.apache.tez.dag.api.Edge; +import org.apache.tez.dag.api.EdgeProperty; +import org.apache.tez.dag.api.EdgeProperty.DataMovementType; +import org.apache.tez.dag.api.EdgeProperty.DataSourceType; +import org.apache.tez.dag.api.EdgeProperty.SchedulingType; +import org.apache.tez.dag.api.InputDescriptor; +import org.apache.tez.dag.api.OutputDescriptor; +import org.apache.tez.dag.api.ProcessorDescriptor; +import org.apache.tez.dag.api.Vertex; +import org.apache.tez.mapreduce.common.MRInputAMSplitGenerator; +import org.apache.tez.mapreduce.hadoop.InputSplitInfo; +import org.apache.tez.mapreduce.hadoop.MRHelpers; +import org.apache.tez.mapreduce.hadoop.MRJobConfig; +import org.apache.tez.mapreduce.hadoop.MultiStageMRConfToTezTranslator; +import org.apache.tez.mapreduce.input.MRInputLegacy; +import org.apache.tez.mapreduce.output.MROutput; +import org.apache.tez.mapreduce.partition.MRPartitioner; +import org.apache.tez.runtime.library.input.ShuffledMergedInputLegacy; +import org.apache.tez.runtime.library.input.ShuffledUnorderedKVInput; +import org.apache.tez.runtime.library.output.OnFileSortedOutput; +import org.apache.tez.runtime.library.output.OnFileUnorderedKVOutput; + +/** + * DagUtils. DagUtils is a collection of helper methods to convert + * map and reduce work to tez vertices and edges. It handles configuration + * objects, file localization and vertex/edge creation. + */ +public class DagUtils { + + private static final String TEZ_DIR = "_tez_scratch_dir"; + private static DagUtils instance; + + /* + * Creates the configuration object necessary to run a specific vertex from + * map work. This includes input formats, input processor, etc. + */ + private JobConf initializeVertexConf(JobConf baseConf, MapWork mapWork) { + JobConf conf = new JobConf(baseConf); + + if (mapWork.getNumMapTasks() != null) { + conf.setInt(MRJobConfig.NUM_MAPS, mapWork.getNumMapTasks().intValue()); + } + + if (mapWork.getMaxSplitSize() != null) { + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, + mapWork.getMaxSplitSize().longValue()); + } + + if (mapWork.getMinSplitSize() != null) { + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, + mapWork.getMinSplitSize().longValue()); + } + + if (mapWork.getMinSplitSizePerNode() != null) { + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZEPERNODE, + mapWork.getMinSplitSizePerNode().longValue()); + } + + if (mapWork.getMinSplitSizePerRack() != null) { + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZEPERRACK, + mapWork.getMinSplitSizePerRack().longValue()); + } + + Utilities.setInputAttributes(conf, mapWork); + + String inpFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEINPUTFORMAT); + if ((inpFormat == null) || (!StringUtils.isNotBlank(inpFormat))) { + inpFormat = ShimLoader.getHadoopShims().getInputFormatClassName(); + } + + if (mapWork.isUseBucketizedHiveInputFormat()) { + inpFormat = BucketizedHiveInputFormat.class.getName(); + } + + conf.set("mapred.mapper.class", ExecMapper.class.getName()); + conf.set("mapred.input.format.class", inpFormat); + + return conf; + } + + /** + * Given two vertices and their respective configuration objects createEdge + * will create an Edge object that connects the two. Currently the edge will + * always be a stable bi-partite edge. + * + * @param vConf JobConf of the first vertex + * @param v The first vertex (source) + * @param wConf JobConf of the second vertex + * @param w The second vertex (sink) + * @return + */ + public Edge createEdge(JobConf vConf, Vertex v, JobConf wConf, Vertex w, + EdgeType edgeType) + throws IOException { + + // Tez needs to setup output subsequent input pairs correctly + MultiStageMRConfToTezTranslator.translateVertexConfToTez(wConf, vConf); + + // update payloads (configuration for the vertices might have changed) + v.getProcessorDescriptor().setUserPayload(MRHelpers.createUserPayloadFromConf(vConf)); + w.getProcessorDescriptor().setUserPayload(MRHelpers.createUserPayloadFromConf(wConf)); + + DataMovementType dataMovementType; + Class logicalInputClass; + Class logicalOutputClass; + + switch (edgeType) { + case BROADCAST_EDGE: + dataMovementType = DataMovementType.BROADCAST; + logicalOutputClass = OnFileUnorderedKVOutput.class; + logicalInputClass = ShuffledUnorderedKVInput.class; + break; + + case SIMPLE_EDGE: + default: + dataMovementType = DataMovementType.SCATTER_GATHER; + logicalOutputClass = OnFileSortedOutput.class; + logicalInputClass = ShuffledMergedInputLegacy.class; + break; + } + + EdgeProperty edgeProperty = + new EdgeProperty(dataMovementType, + DataSourceType.PERSISTED, + SchedulingType.SEQUENTIAL, + new OutputDescriptor(logicalOutputClass.getName()), + new InputDescriptor(logicalInputClass.getName())); + return new Edge(v, w, edgeProperty); + } + + /* + * Helper function to create Vertex from MapWork. + */ + private Vertex createVertex(JobConf conf, MapWork mapWork, + LocalResource appJarLr, List additionalLr, FileSystem fs, + Path mrScratchDir, Context ctx) throws Exception { + + Path tezDir = getTezDir(mrScratchDir); + + // set up the operator plan + Path planPath = Utilities.setMapWork(conf, mapWork, + mrScratchDir.toUri().toString(), false); + + // setup input paths and split info + List inputPaths = Utilities.getInputPaths(conf, mapWork, + mrScratchDir.toUri().toString(), ctx); + Utilities.setInputPaths(conf, inputPaths); + + // create the directories FileSinkOperators need + Utilities.createTmpDirs(conf, mapWork); + + // Tez ask us to call this even if there's no preceding vertex + MultiStageMRConfToTezTranslator.translateVertexConfToTez(conf, null); + + // finally create the vertex + Vertex map = null; + + // use tez to combine splits + boolean useTezGroupedSplits = false; + + int numTasks = -1; + Class amSplitGeneratorClass = null; + InputSplitInfo inputSplitInfo = null; + Class inputFormatClass = conf.getClass("mapred.input.format.class", + InputFormat.class); + + // we'll set up tez to combine spits for us iff the input format + // is HiveInputFormat + if (inputFormatClass == HiveInputFormat.class) { + useTezGroupedSplits = true; + conf.setClass("mapred.input.format.class", TezGroupedSplitsInputFormat.class, InputFormat.class); + } + + if (HiveConf.getBoolVar(conf, ConfVars.HIVE_AM_SPLIT_GENERATION)) { + // if we're generating the splits in the AM, we just need to set + // the correct plugin. + amSplitGeneratorClass = MRInputAMSplitGenerator.class; + } else { + // client side split generation means we have to compute them now + inputSplitInfo = MRHelpers.generateInputSplits(conf, + new Path(tezDir, "split_"+mapWork.getName().replaceAll(" ", "_"))); + numTasks = inputSplitInfo.getNumTasks(); + } + + byte[] serializedConf = MRHelpers.createUserPayloadFromConf(conf); + map = new Vertex(mapWork.getName(), + new ProcessorDescriptor(MapTezProcessor.class.getName()). + setUserPayload(serializedConf), numTasks, + MRHelpers.getMapResource(conf)); + Map environment = new HashMap(); + MRHelpers.updateEnvironmentForMRTasks(conf, environment, true); + map.setTaskEnvironment(environment); + map.setJavaOpts(MRHelpers.getMapJavaOpts(conf)); + + assert mapWork.getAliasToWork().keySet().size() == 1; + + String alias = mapWork.getAliasToWork().keySet().iterator().next(); + + byte[] mrInput = null; + if (useTezGroupedSplits) { + mrInput = MRHelpers.createMRInputPayloadWithGrouping(serializedConf, + null, HiveInputFormat.class.getName()); + } else { + mrInput = MRHelpers.createMRInputPayload(serializedConf, null); + } + map.addInput(alias, + new InputDescriptor(MRInputLegacy.class.getName()). + setUserPayload(mrInput), amSplitGeneratorClass); + + Map localResources = new HashMap(); + localResources.put(getBaseName(appJarLr), appJarLr); + for (LocalResource lr: additionalLr) { + localResources.put(getBaseName(lr), lr); + } + + if (inputSplitInfo != null) { + // only relevant for client-side split generation + map.setTaskLocationsHint(inputSplitInfo.getTaskLocationHints()); + MRHelpers.updateLocalResourcesForInputSplits(FileSystem.get(conf), inputSplitInfo, + localResources); + } + + map.setTaskLocalResources(localResources); + return map; + } + + /* + * Helper function to create JobConf for specific ReduceWork. + */ + private JobConf initializeVertexConf(JobConf baseConf, ReduceWork reduceWork) { + JobConf conf = new JobConf(baseConf); + + conf.set("mapred.reducer.class", ExecReducer.class.getName()); + + boolean useSpeculativeExecReducers = HiveConf.getBoolVar(conf, + HiveConf.ConfVars.HIVESPECULATIVEEXECREDUCERS); + HiveConf.setBoolVar(conf, HiveConf.ConfVars.HADOOPSPECULATIVEEXECREDUCERS, + useSpeculativeExecReducers); + + return conf; + } + + /* + * Helper function to create Vertex for given ReduceWork. + */ + private Vertex createVertex(JobConf conf, ReduceWork reduceWork, + LocalResource appJarLr, List additionalLr, FileSystem fs, + Path mrScratchDir, Context ctx) throws Exception { + + // set up operator plan + Path planPath = Utilities.setReduceWork(conf, reduceWork, + mrScratchDir.toUri().toString(), false); + + // create the directories FileSinkOperators need + Utilities.createTmpDirs(conf, reduceWork); + + // Call once here, will be updated when we find edges + MultiStageMRConfToTezTranslator.translateVertexConfToTez(conf, null); + + // create the vertex + Vertex reducer = new Vertex(reduceWork.getName(), + new ProcessorDescriptor(ReduceTezProcessor.class.getName()). + setUserPayload(MRHelpers.createUserPayloadFromConf(conf)), + reduceWork.getNumReduceTasks(), MRHelpers.getReduceResource(conf)); + + Map environment = new HashMap(); + + MRHelpers.updateEnvironmentForMRTasks(conf, environment, false); + reducer.setTaskEnvironment(environment); + + reducer.setJavaOpts(MRHelpers.getReduceJavaOpts(conf)); + + Map localResources = new HashMap(); + localResources.put(getBaseName(appJarLr), appJarLr); + for (LocalResource lr: additionalLr) { + localResources.put(getBaseName(lr), lr); + } + reducer.setTaskLocalResources(localResources); + + return reducer; + } + + /* + * Helper method to create a yarn local resource. + */ + private LocalResource createLocalResource(FileSystem remoteFs, Path file, + LocalResourceType type, LocalResourceVisibility visibility) { + + FileStatus fstat = null; + try { + fstat = remoteFs.getFileStatus(file); + } catch (IOException e) { + e.printStackTrace(); + } + + URL resourceURL = ConverterUtils.getYarnUrlFromPath(file); + long resourceSize = fstat.getLen(); + long resourceModificationTime = fstat.getModificationTime(); + + LocalResource lr = Records.newRecord(LocalResource.class); + lr.setResource(resourceURL); + lr.setType(type); + lr.setSize(resourceSize); + lr.setVisibility(visibility); + lr.setTimestamp(resourceModificationTime); + + return lr; + } + + /** + * @param conf + * @return path to destination directory on hdfs + * @throws LoginException if we are unable to figure user information + * @throws IOException when any dfs operation fails. + */ + public Path getDefaultDestDir(Configuration conf) throws LoginException, IOException { + UserGroupInformation ugi = ShimLoader.getHadoopShims().getUGIForConf(conf); + String userName = ShimLoader.getHadoopShims().getShortUserName(ugi); + String userPathStr = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_USER_INSTALL_DIR); + Path userPath = new Path(userPathStr); + FileSystem fs = userPath.getFileSystem(conf); + if (!(fs instanceof DistributedFileSystem)) { + throw new IOException(ErrorMsg.INVALID_HDFS_URI.format(userPathStr)); + } + + String jarPathStr = userPathStr + "/" + userName; + String hdfsDirPathStr = jarPathStr; + Path hdfsDirPath = new Path(hdfsDirPathStr); + + FileStatus fstatus = fs.getFileStatus(hdfsDirPath); + if (!fstatus.isDir()) { + throw new IOException(ErrorMsg.INVALID_DIR.format(hdfsDirPath.toString())); + } + + Path retPath = new Path(hdfsDirPath.toString() + "/.hiveJars"); + + fs.mkdirs(retPath); + return retPath; + } + + /** + * Localizes files, archives and jars the user has instructed us + * to provide on the cluster as resources for execution. + * + * @param conf + * @return List local resources to add to execution + * @throws IOException when hdfs operation fails + * @throws LoginException when getDefaultDestDir fails with the same exception + */ + public List localizeTempFiles(Configuration conf) throws IOException, LoginException { + List tmpResources = new ArrayList(); + + String addedFiles = Utilities.getResourceFiles(conf, SessionState.ResourceType.FILE); + if (StringUtils.isNotBlank(addedFiles)) { + HiveConf.setVar(conf, ConfVars.HIVEADDEDFILES, addedFiles); + } + String addedJars = Utilities.getResourceFiles(conf, SessionState.ResourceType.JAR); + if (StringUtils.isNotBlank(addedJars)) { + HiveConf.setVar(conf, ConfVars.HIVEADDEDJARS, addedJars); + } + String addedArchives = Utilities.getResourceFiles(conf, SessionState.ResourceType.ARCHIVE); + if (StringUtils.isNotBlank(addedArchives)) { + HiveConf.setVar(conf, ConfVars.HIVEADDEDARCHIVES, addedArchives); + } + + String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS); + + // need to localize the additional jars and files + + // we need the directory on hdfs to which we shall put all these files + String hdfsDirPathStr = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_JAR_DIRECTORY); + Path hdfsDirPath = new Path(hdfsDirPathStr); + FileSystem fs = hdfsDirPath.getFileSystem(conf); + if (!(fs instanceof DistributedFileSystem)) { + throw new IOException(ErrorMsg.INVALID_HDFS_URI.format(hdfsDirPathStr)); + } + + FileStatus fstatus = null; + try { + fstatus = fs.getFileStatus(hdfsDirPath); + } catch (FileNotFoundException fe) { + // do nothing + } + + if ((fstatus == null) || (!fstatus.isDir())) { + Path destDir = getDefaultDestDir(conf); + hdfsDirPathStr = destDir.toString(); + } + + String allFiles = auxJars + "," + addedJars + "," + addedFiles + "," + addedArchives; + String[] allFilesArr = allFiles.split(","); + for (String file : allFilesArr) { + if (!StringUtils.isNotBlank(file)) { + continue; + } + String hdfsFilePathStr = hdfsDirPathStr + "/" + getResourceBaseName(file); + LocalResource localResource = localizeResource(new Path(file), + new Path(hdfsFilePathStr), conf); + tmpResources.add(localResource); + } + + return tmpResources; + } + + // the api that finds the jar being used by this class on disk + public String getExecJarPathLocal () throws URISyntaxException { + // returns the location on disc of the jar of this class. + return DagUtils.class.getProtectionDomain().getCodeSource().getLocation().toURI().toString(); + } + + /* + * Helper function to retrieve the basename of a local resource + */ + public String getBaseName(LocalResource lr) { + return FilenameUtils.getName(lr.getResource().getFile()); + } + + /** + * @param pathStr - the string from which we try to determine the resource base name + * @return the name of the resource from a given path string. + */ + public String getResourceBaseName(String pathStr) { + String[] splits = pathStr.split("/"); + return splits[splits.length - 1]; + } + + /** + * @param src the source file. + * @param dest the destination file. + * @param conf the configuration + * @return true if the file names match else returns false. + * @throws IOException when any file system related call fails + */ + private boolean checkPreExisting(Path src, Path dest, Configuration conf) + throws IOException { + FileSystem destFS = dest.getFileSystem(conf); + + if (!destFS.exists(dest)) { + return false; + } + FileStatus destStatus = destFS.getFileStatus(dest); + if (destStatus.isDir()) { + return false; + } + + String srcName = getResourceBaseName(src.toString()); + String destName = getResourceBaseName(dest.toString()); + + if (srcName.equals(destName)) { + return true; + } + + return false; + } + + /** + * @param src path to the source for the resource + * @param dest path in hdfs for the resource + * @param conf + * @return localresource from tez localization. + * @throws IOException when any file system related calls fails. + */ + public LocalResource localizeResource(Path src, Path dest, Configuration conf) + throws IOException { + FileSystem destFS = dest.getFileSystem(conf); + if (!(destFS instanceof DistributedFileSystem)) { + throw new IOException(ErrorMsg.INVALID_HDFS_URI.format(dest.toString())); + } + + if (src != null) { + if (!checkPreExisting(src, dest, conf)) { + // copy the src to the destination and create local resource. + // overwrite even if file already exists. + destFS.copyFromLocalFile(false, true, src, dest); + } + } + + return createLocalResource(destFS, dest, LocalResourceType.FILE, + LocalResourceVisibility.APPLICATION); + } + + /** + * Creates and initializes a JobConf object that can be used to execute + * the DAG. The configuration object will contain configurations from mapred-site + * overlaid with key/value pairs from the hiveConf object. Finally it will also + * contain some hive specific configurations that do not change from DAG to DAG. + * + * @param hiveConf Current hiveConf for the execution + * @return JobConf base configuration for job execution + * @throws IOException + */ + public JobConf createConfiguration(HiveConf hiveConf) throws IOException { + hiveConf.setBoolean("mapred.mapper.new-api", false); + + JobConf conf = (JobConf) MRHelpers.getBaseMRConfiguration(hiveConf); + + conf.set("mapred.output.committer.class", NullOutputCommitter.class.getName()); + + conf.setBoolean("mapred.committer.job.setup.cleanup.needed", false); + conf.setBoolean("mapred.committer.job.task.cleanup.needed", false); + + conf.setClass("mapred.output.format.class", HiveOutputFormatImpl.class, OutputFormat.class); + + conf.set(MRJobConfig.OUTPUT_KEY_CLASS, HiveKey.class.getName()); + conf.set(MRJobConfig.OUTPUT_VALUE_CLASS, BytesWritable.class.getName()); + + conf.set("mapred.partitioner.class", HiveConf.getVar(conf, HiveConf.ConfVars.HIVEPARTITIONER)); + conf.set("tez.runtime.partitioner.class", MRPartitioner.class.getName()); + + return conf; + } + + /** + * Creates and initializes the JobConf object for a given BaseWork object. + * + * @param conf Any configurations in conf will be copied to the resulting new JobConf object. + * @param work BaseWork will be used to populate the configuration object. + * @return JobConf new configuration object + */ + public JobConf initializeVertexConf(JobConf conf, BaseWork work) { + + // simply dispatch the call to the right method for the actual (sub-) type of + // BaseWork. + if (work instanceof MapWork) { + return initializeVertexConf(conf, (MapWork)work); + } else if (work instanceof ReduceWork) { + return initializeVertexConf(conf, (ReduceWork)work); + } else { + assert false; + return null; + } + } + + /** + * Create a vertex from a given work object. + * + * @param conf JobConf to be used to this execution unit + * @param work The instance of BaseWork representing the actual work to be performed + * by this vertex. + * @param scratchDir HDFS scratch dir for this execution unit. + * @param appJarLr Local resource for hive-exec. + * @param additionalLr + * @param fileSystem FS corresponding to scratchDir and LocalResources + * @param ctx This query's context + * @return Vertex + */ + public Vertex createVertex(JobConf conf, BaseWork work, + Path scratchDir, LocalResource appJarLr, List additionalLr, + FileSystem fileSystem, Context ctx, boolean hasChildren) throws Exception { + + Vertex v = null; + // simply dispatch the call to the right method for the actual (sub-) type of + // BaseWork. + if (work instanceof MapWork) { + v = createVertex(conf, (MapWork) work, appJarLr, + additionalLr, fileSystem, scratchDir, ctx); + } else if (work instanceof ReduceWork) { + v = createVertex(conf, (ReduceWork) work, appJarLr, + additionalLr, fileSystem, scratchDir, ctx); + } else { + // something is seriously wrong if this is happening + throw new HiveException(ErrorMsg.GENERIC_ERROR.getErrorCodedMsg()); + } + + // initialize stats publisher if necessary + if (work.isGatheringStats()) { + StatsPublisher statsPublisher; + StatsFactory factory = StatsFactory.newFactory(conf); + if (factory != null) { + statsPublisher = factory.getStatsPublisher(); + if (!statsPublisher.init(conf)) { // creating stats table if not exists + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) { + throw + new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg()); + } + } + } + } + + + // final vertices need to have at least one output + if (!hasChildren) { + v.addOutput("out_"+work.getName(), + new OutputDescriptor(MROutput.class.getName()) + .setUserPayload(MRHelpers.createUserPayloadFromConf(conf))); + } + + return v; + } + + /** + * createTezDir creates a temporary directory in the scratchDir folder to + * be used with Tez. Assumes scratchDir exists. + */ + public Path createTezDir(Path scratchDir, Configuration conf) + throws IOException { + Path tezDir = getTezDir(scratchDir); + FileSystem fs = tezDir.getFileSystem(conf); + fs.mkdirs(tezDir); + return tezDir; + } + + /** + * Gets the tez dir that belongs to the hive scratch dir + */ + public Path getTezDir(Path scratchDir) { + return new Path(scratchDir, TEZ_DIR); + } + + /** + * Singleton + * @return instance of this class + */ + public static DagUtils getInstance() { + if (instance == null) { + instance = new DagUtils(); + } + return instance; + } + + private DagUtils() { + // don't instantiate + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java (working copy) @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez; + +import java.io.IOException; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.MapJoinOperator; +import org.apache.hadoop.hive.ql.exec.MapredContext; +import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; +import org.apache.hadoop.hive.ql.exec.persistence.HashMapWrapper; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinRowContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.MapJoinDesc; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.io.Writable; +import org.apache.tez.runtime.api.LogicalInput; +import org.apache.tez.runtime.library.api.KeyValueReader; + +/** + * HashTableLoader for Tez constructs the hashtable from records read from + * a broadcast edge. + */ +public class HashTableLoader implements org.apache.hadoop.hive.ql.exec.HashTableLoader { + + private static final Log LOG = LogFactory.getLog(MapJoinOperator.class.getName()); + + public HashTableLoader() { + } + + @Override + public void load(ExecMapperContext context, + Configuration hconf, + MapJoinDesc desc, + byte posBigTable, + MapJoinTableContainer[] mapJoinTables, + MapJoinTableContainerSerDe[] mapJoinTableSerdes) throws HiveException { + + TezContext tezContext = (TezContext) MapredContext.get(); + Map parentToInput = desc.getParentToInput(); + int hashTableThreshold = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD); + float hashTableLoadFactor = HiveConf.getFloatVar(hconf, + HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR); + + for (int pos = 0; pos < mapJoinTables.length; pos++) { + if (pos == posBigTable) { + continue; + } + + LogicalInput input = tezContext.getInput(parentToInput.get(pos)); + + try { + KeyValueReader kvReader = (KeyValueReader) input.getReader(); + + MapJoinTableContainer tableContainer = new HashMapWrapper(hashTableThreshold, + hashTableLoadFactor); + + // simply read all the kv pairs into the hashtable. + while (kvReader.next()) { + MapJoinKey key = new MapJoinKey(); + key.read(mapJoinTableSerdes[pos].getKeyContext(), (Writable)kvReader.getCurrentKey()); + + MapJoinRowContainer values = tableContainer.get(key); + if(values == null){ + values = new MapJoinRowContainer(); + tableContainer.put(key, values); + } + values.read(mapJoinTableSerdes[pos].getValueContext(), (Writable)kvReader.getCurrentValue()); + } + + mapJoinTables[pos] = tableContainer; + } catch (IOException e) { + throw new HiveException(e); + } catch (SerDeException e) { + throw new HiveException(e); + } catch (Exception e) { + throw new HiveException(e); + } + } + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java (working copy) @@ -0,0 +1,250 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.ql.exec.HashTableDummyOperator; +import org.apache.hadoop.hive.ql.exec.MapOperator; +import org.apache.hadoop.hive.ql.exec.MapredContext; +import org.apache.hadoop.hive.ql.exec.ObjectCache; +import org.apache.hadoop.hive.ql.exec.ObjectCacheFactory; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.OperatorUtils; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.mr.ExecMapper.reportStats; +import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorMapOperator; +import org.apache.hadoop.hive.ql.log.PerfLogger; +import org.apache.hadoop.hive.ql.plan.MapWork; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.util.StringUtils; +import org.apache.tez.mapreduce.input.MRInputLegacy; +import org.apache.tez.mapreduce.processor.MRTaskReporter; +import org.apache.tez.runtime.api.LogicalInput; +import org.apache.tez.runtime.library.api.KeyValueReader; + +/** + * Process input from tez LogicalInput and write output - for a map plan + * Just pump the records through the query plan. + */ +public class MapRecordProcessor extends RecordProcessor{ + + + private MapOperator mapOp; + public static final Log l4j = LogFactory.getLog(MapRecordProcessor.class); + private final ExecMapperContext execContext = new ExecMapperContext(); + private boolean abort = false; + protected static final String MAP_PLAN_KEY = "__MAP_PLAN__"; + private MapWork mapWork; + + @Override + void init(JobConf jconf, MRTaskReporter mrReporter, Map inputs, + Map outMap){ + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS); + super.init(jconf, mrReporter, inputs, outMap); + + //Update JobConf using MRInput, info like filename comes via this + MRInputLegacy mrInput = getMRInput(inputs); + try { + mrInput.init(); + } catch (IOException e) { + throw new RuntimeException("Failed while initializing MRInput", e); + } + Configuration updatedConf = mrInput.getConfigUpdates(); + if (updatedConf != null) { + for (Entry entry : updatedConf) { + jconf.set(entry.getKey(), entry.getValue()); + } + } + + ObjectCache cache = ObjectCacheFactory.getCache(jconf); + try { + + execContext.setJc(jconf); + // create map and fetch operators + mapWork = (MapWork) cache.retrieve(MAP_PLAN_KEY); + if (mapWork == null) { + mapWork = Utilities.getMapWork(jconf); + cache.cache(MAP_PLAN_KEY, mapWork); + l4j.info("Plan: "+mapWork); + for (String s: mapWork.getAliases()) { + l4j.info("Alias: "+s); + } + } else { + Utilities.setMapWork(jconf, mapWork); + } + if (mapWork.getVectorMode()) { + mapOp = new VectorMapOperator(); + } else { + mapOp = new MapOperator(); + } + + // initialize map operator + mapOp.setConf(mapWork); + mapOp.setChildren(jconf); + l4j.info(mapOp.dump(0)); + + MapredContext.init(true, new JobConf(jconf)); + ((TezContext)MapredContext.get()).setInputs(inputs); + mapOp.setExecContext(execContext); + mapOp.initializeLocalWork(jconf); + mapOp.initialize(jconf, null); + + // Initialization isn't finished until all parents of all operators + // are initialized. For broadcast joins that means initializing the + // dummy parent operators as well. + List dummyOps = mapWork.getDummyOps(); + if (dummyOps != null) { + for (Operator dummyOp : dummyOps){ + dummyOp.setExecContext(execContext); + dummyOp.initialize(jconf, null); + } + } + + OperatorUtils.setChildrenCollector(mapOp.getChildOperators(), outMap); + mapOp.setReporter(reporter); + MapredContext.get().setReporter(reporter); + + } catch (Throwable e) { + abort = true; + if (e instanceof OutOfMemoryError) { + // will this be true here? + // Don't create a new object if we are already out of memory + throw (OutOfMemoryError) e; + } else { + throw new RuntimeException("Map operator initialization failed", e); + } + } + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS); + } + + private MRInputLegacy getMRInput(Map inputs) { + //there should be only one MRInput + MRInputLegacy theMRInput = null; + for(LogicalInput inp : inputs.values()){ + if(inp instanceof MRInputLegacy){ + if(theMRInput != null){ + throw new IllegalArgumentException("Only one MRInput is expected"); + } + //a better logic would be to find the alias + theMRInput = (MRInputLegacy)inp; + } + } + return theMRInput; + } + + @Override + void run() throws IOException{ + + MRInputLegacy in = getMRInput(inputs); + KeyValueReader reader = in.getReader(); + + //process records until done + while(reader.next()){ + //ignore the key for maps - reader.getCurrentKey(); + Object value = reader.getCurrentValue(); + boolean needMore = processRow(value); + if(!needMore){ + break; + } + } + } + + + /** + * @param value value to process + * @return true if it is not done and can take more inputs + */ + private boolean processRow(Object value) { + // reset the execContext for each new row + execContext.resetRow(); + + try { + if (mapOp.getDone()) { + return false; //done + } else { + // Since there is no concept of a group, we don't invoke + // startGroup/endGroup for a mapper + mapOp.process((Writable)value); + if (isLogInfoEnabled) { + logProgress(); + } + } + } catch (Throwable e) { + abort = true; + if (e instanceof OutOfMemoryError) { + // Don't create a new object if we are already out of memory + throw (OutOfMemoryError) e; + } else { + l4j.fatal(StringUtils.stringifyException(e)); + throw new RuntimeException(e); + } + } + return true; //give me more + } + + @Override + void close(){ + // check if there are IOExceptions + if (!abort) { + abort = execContext.getIoCxt().getIOExceptions(); + } + + // detecting failed executions by exceptions thrown by the operator tree + try { + mapOp.close(abort); + + // Need to close the dummyOps as well. The operator pipeline + // is not considered "closed/done" unless all operators are + // done. For broadcast joins that includes the dummy parents. + List dummyOps = mapWork.getDummyOps(); + if (dummyOps != null) { + for (Operator dummyOp : dummyOps){ + dummyOp.close(abort); + } + } + + if (isLogInfoEnabled) { + logCloseInfo(); + } + reportStats rps = new reportStats(reporter); + mapOp.preorderMap(rps); + return; + } catch (Exception e) { + if (!abort) { + // signal new failure to map-reduce + l4j.error("Hit error while closing operators - failing tree"); + throw new RuntimeException("Hive Runtime Error while closing operators", e); + } + } finally { + Utilities.clearWorkMap(); + MapredContext.close(); + } + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapTezProcessor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapTezProcessor.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapTezProcessor.java (working copy) @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez; + +/** + * Subclass that is used to indicate if this is a map or reduce process + */ +public class MapTezProcessor extends TezProcessor { + public MapTezProcessor(){ + super(true); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ObjectCache.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ObjectCache.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ObjectCache.java (working copy) @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.tez; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.tez.runtime.common.objectregistry.ObjectLifeCycle; +import org.apache.tez.runtime.common.objectregistry.ObjectRegistry; +import org.apache.tez.runtime.common.objectregistry.ObjectRegistryFactory; + + +/** + * ObjectCache. Tez implementation based on the tez object registry. + * + */ +public class ObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCache { + + private static final Log LOG = LogFactory.getLog(ObjectCache.class.getName()); + private final ObjectRegistry registry = ObjectRegistryFactory.getObjectRegistry(); + + @Override + public void cache(String key, Object value) { + LOG.info("Adding " + key + " to cache with value " + value); + registry.add(ObjectLifeCycle.VERTEX, key, value); + } + + @Override + public Object retrieve(String key) { + Object o = registry.get(key); + if (o != null) { + LOG.info("Found " + key + " in cache with value: " + o); + } + return o; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java (working copy) @@ -0,0 +1,135 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; +import java.net.URLClassLoader; +import java.util.Arrays; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.log.PerfLogger; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.tez.mapreduce.processor.MRTaskReporter; +import org.apache.tez.runtime.api.LogicalInput; + +/** + * Process input from tez LogicalInput and write output + * It has different subclasses for map and reduce processing + */ +public abstract class RecordProcessor { + + protected JobConf jconf; + protected Map inputs; + protected Map outMap; + + public static final Log l4j = LogFactory.getLog(RecordProcessor.class); + + + // used to log memory usage periodically + public static MemoryMXBean memoryMXBean; + protected boolean isLogInfoEnabled = false; + protected MRTaskReporter reporter; + + private long numRows = 0; + private long nextUpdateCntr = 1; + protected PerfLogger perfLogger = PerfLogger.getPerfLogger(); + protected String CLASS_NAME = RecordProcessor.class.getName(); + + + /** + * Common initialization code for RecordProcessors + * @param jconf + * @param mrReporter + * @param inputs + * @param out + */ + void init(JobConf jconf, MRTaskReporter mrReporter, Map inputs, + Map outMap){ + this.jconf = jconf; + this.reporter = mrReporter; + this.inputs = inputs; + this.outMap = outMap; + + // Allocate the bean at the beginning - + memoryMXBean = ManagementFactory.getMemoryMXBean(); + + l4j.info("maximum memory = " + memoryMXBean.getHeapMemoryUsage().getMax()); + + isLogInfoEnabled = l4j.isInfoEnabled(); + + //log classpaths + try { + if (l4j.isDebugEnabled()) { + l4j.debug("conf classpath = " + + Arrays.asList(((URLClassLoader) jconf.getClassLoader()).getURLs())); + l4j.debug("thread classpath = " + + Arrays.asList(((URLClassLoader) Thread.currentThread() + .getContextClassLoader()).getURLs())); + } + } catch (Exception e) { + l4j.info("cannot get classpath: " + e.getMessage()); + } + } + + /** + * start processing the inputs and writing output + * @throws IOException + */ + abstract void run() throws IOException; + + + abstract void close(); + + /** + * Log information to be logged at the end + */ + protected void logCloseInfo() { + long used_memory = memoryMXBean.getHeapMemoryUsage().getUsed(); + l4j.info("ExecMapper: processed " + numRows + " rows: used memory = " + + used_memory); + } + + /** + * Log number of records processed and memory used after processing many records + */ + protected void logProgress() { + numRows++; + if (numRows == nextUpdateCntr) { + long used_memory = memoryMXBean.getHeapMemoryUsage().getUsed(); + l4j.info("ExecMapper: processing " + numRows + + " rows: used memory = " + used_memory); + nextUpdateCntr = getNextUpdateRecordCounter(numRows); + } + } + + private long getNextUpdateRecordCounter(long cntr) { + // A very simple counter to keep track of number of rows processed by the + // reducer. It dumps + // every 1 million times, and quickly before that + if (cntr >= 1000000) { + return cntr + 1000000; + } + + return 10 * cntr; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java (working copy) @@ -0,0 +1,372 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.HashTableDummyOperator; +import org.apache.hadoop.hive.ql.exec.MapredContext; +import org.apache.hadoop.hive.ql.exec.ObjectCache; +import org.apache.hadoop.hive.ql.exec.ObjectCacheFactory; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.OperatorUtils; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.mr.ExecMapper.reportStats; +import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; +import org.apache.hadoop.hive.ql.exec.tez.tools.InputMerger; +import org.apache.hadoop.hive.ql.log.PerfLogger; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.ReduceWork; +import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hadoop.hive.serde2.SerDe; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.SerDeUtils; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.tez.mapreduce.processor.MRTaskReporter; +import org.apache.tez.runtime.api.LogicalInput; +import org.apache.tez.runtime.library.api.KeyValuesReader; +import org.apache.tez.runtime.library.input.ShuffledMergedInput; + +/** + * Process input from tez LogicalInput and write output - for a map plan + * Just pump the records through the query plan. + */ +public class ReduceRecordProcessor extends RecordProcessor{ + + private static final String REDUCE_PLAN_KEY = "__REDUCE_PLAN__"; + + public static final Log l4j = LogFactory.getLog(ReduceRecordProcessor.class); + private final ExecMapperContext execContext = new ExecMapperContext(); + private boolean abort = false; + private Deserializer inputKeyDeserializer; + + // Input value serde needs to be an array to support different SerDe + // for different tags + private final SerDe[] inputValueDeserializer = new SerDe[Byte.MAX_VALUE]; + + TableDesc keyTableDesc; + TableDesc[] valueTableDesc; + + ObjectInspector[] rowObjectInspector; + private Operator reducer; + private boolean isTagged = false; + + private Object keyObject = null; + private BytesWritable groupKey; + + private ReduceWork redWork; + + List row = new ArrayList(Utilities.reduceFieldNameList.size()); + + @Override + void init(JobConf jconf, MRTaskReporter mrReporter, Map inputs, + Map outMap){ + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS); + super.init(jconf, mrReporter, inputs, outMap); + + ObjectCache cache = ObjectCacheFactory.getCache(jconf); + + rowObjectInspector = new ObjectInspector[Byte.MAX_VALUE]; + ObjectInspector[] valueObjectInspector = new ObjectInspector[Byte.MAX_VALUE]; + ObjectInspector keyObjectInspector; + + redWork = (ReduceWork) cache.retrieve(REDUCE_PLAN_KEY); + if (redWork == null) { + redWork = Utilities.getReduceWork(jconf); + cache.cache(REDUCE_PLAN_KEY, redWork); + } else { + Utilities.setReduceWork(jconf, redWork); + } + + reducer = redWork.getReducer(); + reducer.setParentOperators(null); // clear out any parents as reducer is the + // root + isTagged = redWork.getNeedsTagging(); + try { + keyTableDesc = redWork.getKeyDesc(); + inputKeyDeserializer = (SerDe) ReflectionUtils.newInstance(keyTableDesc + .getDeserializerClass(), null); + inputKeyDeserializer.initialize(null, keyTableDesc.getProperties()); + keyObjectInspector = inputKeyDeserializer.getObjectInspector(); + valueTableDesc = new TableDesc[redWork.getTagToValueDesc().size()]; + for (int tag = 0; tag < redWork.getTagToValueDesc().size(); tag++) { + // We should initialize the SerDe with the TypeInfo when available. + valueTableDesc[tag] = redWork.getTagToValueDesc().get(tag); + inputValueDeserializer[tag] = (SerDe) ReflectionUtils.newInstance( + valueTableDesc[tag].getDeserializerClass(), null); + inputValueDeserializer[tag].initialize(null, valueTableDesc[tag] + .getProperties()); + valueObjectInspector[tag] = inputValueDeserializer[tag] + .getObjectInspector(); + + ArrayList ois = new ArrayList(); + ois.add(keyObjectInspector); + ois.add(valueObjectInspector[tag]); + rowObjectInspector[tag] = ObjectInspectorFactory + .getStandardStructObjectInspector(Utilities.reduceFieldNameList, ois); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + + MapredContext.init(false, new JobConf(jconf)); + ((TezContext)MapredContext.get()).setInputs(inputs); + + // initialize reduce operator tree + try { + l4j.info(reducer.dump(0)); + reducer.initialize(jconf, rowObjectInspector); + + // Initialization isn't finished until all parents of all operators + // are initialized. For broadcast joins that means initializing the + // dummy parent operators as well. + List dummyOps = redWork.getDummyOps(); + if (dummyOps != null) { + for (Operator dummyOp : dummyOps){ + dummyOp.setExecContext(execContext); + dummyOp.initialize(jconf, null); + } + } + + // set output collector for any reduce sink operators in the pipeline. + List> children = new LinkedList>(); + children.add(reducer); + if (dummyOps != null) { + children.addAll(dummyOps); + } + OperatorUtils.setChildrenCollector(children, outMap); + + reducer.setReporter(reporter); + MapredContext.get().setReporter(reporter); + + } catch (Throwable e) { + abort = true; + if (e instanceof OutOfMemoryError) { + // Don't create a new object if we are already out of memory + throw (OutOfMemoryError) e; + } else { + throw new RuntimeException("Reduce operator initialization failed", e); + } + } + + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS); + } + + @Override + void run() throws IOException{ + List shuffleInputs = getShuffleInputs(inputs); + KeyValuesReader kvsReader; + + if(shuffleInputs.size() == 1){ + //no merging of inputs required + kvsReader = shuffleInputs.get(0).getReader(); + }else { + //get a sort merged input + kvsReader = new InputMerger(shuffleInputs); + } + + while(kvsReader.next()){ + Object key = kvsReader.getCurrentKey(); + Iterable values = kvsReader.getCurrentValues(); + boolean needMore = processKeyValues(key, values); + if(!needMore){ + break; + } + } + + } + + /** + * Get the inputs that should be streamed through reduce plan. + * @param inputs + * @return + */ + private List getShuffleInputs(Map inputs) { + //the reduce plan inputs have tags, add all inputs that have tags + Map tag2input = redWork.getTagToInput(); + ArrayList shuffleInputs = new ArrayList(); + for(String inpStr : tag2input.values()){ + shuffleInputs.add((ShuffledMergedInput)inputs.get(inpStr)); + } + return shuffleInputs; + } + + /** + * @param key + * @param values + * @return true if it is not done and can take more inputs + */ + private boolean processKeyValues(Object key, Iterable values) { + if(reducer.getDone()){ + //done - no more records needed + return false; + } + + // reset the execContext for each new row + execContext.resetRow(); + + try { + BytesWritable keyWritable = (BytesWritable) key; + + byte tag = 0; + if (isTagged) { + // remove the tag from key coming out of reducer + // and store it in separate variable. + int size = keyWritable.getSize() - 1; + tag = keyWritable.get()[size]; + keyWritable.setSize(size); + } + + //Set the key, check if this is a new group or same group + if (!keyWritable.equals(groupKey)) { + // If a operator wants to do some work at the beginning of a group + if (groupKey == null) { // the first group + groupKey = new BytesWritable(); + } else { + // If a operator wants to do some work at the end of a group + l4j.trace("End Group"); + reducer.endGroup(); + } + + try { + keyObject = inputKeyDeserializer.deserialize(keyWritable); + } catch (Exception e) { + throw new HiveException( + "Hive Runtime Error: Unable to deserialize reduce input key from " + + Utilities.formatBinaryString(keyWritable.get(), 0, + keyWritable.getSize()) + " with properties " + + keyTableDesc.getProperties(), e); + } + + groupKey.set(keyWritable.get(), 0, keyWritable.getSize()); + l4j.trace("Start Group"); + reducer.startGroup(); + reducer.setGroupKeyObject(keyObject); + } + + //process all the values we have for this key + Iterator valuesIt = values.iterator(); + while (valuesIt.hasNext()) { + BytesWritable valueWritable = (BytesWritable) valuesIt.next(); + Object valueObj; + try { + valueObj = inputValueDeserializer[tag].deserialize(valueWritable); + } catch (SerDeException e) { + throw new HiveException( + "Hive Runtime Error: Unable to deserialize reduce input value (tag=" + + tag + + ") from " + + Utilities.formatBinaryString(valueWritable.get(), 0, + valueWritable.getSize()) + " with properties " + + valueTableDesc[tag].getProperties(), e); + } + row.clear(); + row.add(keyObject); + row.add(valueObj); + + try { + reducer.processOp(row, tag); + } catch (Exception e) { + String rowString = null; + try { + rowString = SerDeUtils.getJSONString(row, rowObjectInspector[tag]); + } catch (Exception e2) { + rowString = "[Error getting row data with exception " + + StringUtils.stringifyException(e2) + " ]"; + } + throw new HiveException("Hive Runtime Error while processing row (tag=" + + tag + ") " + rowString, e); + } + if (isLogInfoEnabled) { + logProgress(); + } + } + + } catch (Throwable e) { + abort = true; + if (e instanceof OutOfMemoryError) { + // Don't create a new object if we are already out of memory + throw (OutOfMemoryError) e; + } else { + l4j.fatal(StringUtils.stringifyException(e)); + throw new RuntimeException(e); + } + } + return true; //give me more + } + + @Override + void close(){ + // check if there are IOExceptions + if (!abort) { + abort = execContext.getIoCxt().getIOExceptions(); + } + + try { + if (groupKey != null) { + // If a operator wants to do some work at the end of a group + l4j.trace("End Group"); + reducer.endGroup(); + } + if (isLogInfoEnabled) { + logCloseInfo(); + } + + reducer.close(abort); + + // Need to close the dummyOps as well. The operator pipeline + // is not considered "closed/done" unless all operators are + // done. For broadcast joins that includes the dummy parents. + List dummyOps = redWork.getDummyOps(); + if (dummyOps != null) { + for (Operator dummyOp : dummyOps){ + dummyOp.close(abort); + } + } + reportStats rps = new reportStats(reporter); + reducer.preorderMap(rps); + + } catch (Exception e) { + if (!abort) { + // signal new failure to map-reduce + l4j.error("Hit error while closing operators - failing tree"); + throw new RuntimeException("Hive Runtime Error while closing operators: " + + e.getMessage(), e); + } + } finally { + Utilities.clearWorkMap(); + MapredContext.close(); + } + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceTezProcessor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceTezProcessor.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceTezProcessor.java (working copy) @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez; + +/** + * Subclass that is used to indicate if this is a map or reduce process + */ +public class ReduceTezProcessor extends TezProcessor { + public ReduceTezProcessor(){ + super(false); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezContext.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezContext.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezContext.java (working copy) @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.exec.MapredContext; +import org.apache.hadoop.mapred.JobConf; +import org.apache.tez.runtime.api.LogicalInput; + +/** + * TezContext contains additional context only available with Tez + */ +public class TezContext extends MapredContext { + + // all the inputs for the tez processor + private Map inputs; + + public TezContext(boolean isMap, JobConf jobConf) { + super(isMap, jobConf); + } + + public void setInputs(Map inputs) { + this.inputs = inputs; + } + + public LogicalInput getInput(String name) { + if (inputs == null) { + return null; + } + return inputs.get(name); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java (working copy) @@ -0,0 +1,232 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.tez; + +import static org.apache.tez.dag.api.client.DAGStatus.State.RUNNING; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.log.PerfLogger; +import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; +import org.apache.tez.dag.api.TezException; +import org.apache.tez.dag.api.client.DAGClient; +import org.apache.tez.dag.api.client.DAGStatus; +import org.apache.tez.dag.api.client.Progress; +import org.apache.tez.dag.api.client.StatusGetOpts; + +/** + * TezJobMonitor keeps track of a tez job while it's being executed. It will + * print status to the console and retrieve final status of the job after + * completion. + */ +public class TezJobMonitor { + + private static final Log LOG = LogFactory.getLog(TezJobMonitor.class.getName()); + private static final String CLASS_NAME = TezJobMonitor.class.getName(); + + private transient LogHelper console; + private final PerfLogger perfLogger = PerfLogger.getPerfLogger(); + private final int checkInterval = 200; + private final int maxRetryInterval = 2500; + private final int printInterval = 3000; + private long lastPrintTime; + private Set completed; + private static final List shutdownList; + + static { + shutdownList = Collections.synchronizedList(new LinkedList()); + Runtime.getRuntime().addShutdownHook(new Thread() { + @Override + public void run() { + for (DAGClient c: shutdownList) { + try { + System.err.println("Trying to shutdown DAG"); + c.tryKillDAG(); + } catch (Exception e) { + // ignore + } + } + try { + for (TezSessionState s: TezSessionState.getOpenSessions()) { + System.err.println("Shutting down tez session."); + s.close(false); + } + } catch (Exception e) { + // ignore + } + } + }); + } + + public TezJobMonitor() { + console = new LogHelper(LOG); + } + + /** + * monitorExecution handles status printing, failures during execution and final + * status retrieval. + * + * @param dagClient client that was used to kick off the job + * @return int 0 - success, 1 - killed, 2 - failed + */ + public int monitorExecution(final DAGClient dagClient) throws InterruptedException { + DAGStatus status = null; + completed = new HashSet(); + + boolean running = false; + boolean done = false; + int failedCounter = 0; + int rc = 0; + DAGStatus.State lastState = null; + String lastReport = null; + Set opts = new HashSet(); + + shutdownList.add(dagClient); + + console.printInfo("\n"); + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_DAG); + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_SUBMIT_TO_RUNNING); + + while(true) { + + try { + status = dagClient.getDAGStatus(opts); + Map progressMap = status.getVertexProgress(); + DAGStatus.State state = status.getState(); + + if (state != lastState || state == RUNNING) { + lastState = state; + + switch(state) { + case SUBMITTED: + console.printInfo("Status: Submitted"); + break; + case INITING: + console.printInfo("Status: Initializing"); + break; + case RUNNING: + if (!running) { + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_SUBMIT_TO_RUNNING); + console.printInfo("Status: Running (application id: " + +dagClient.getApplicationId()+")\n"); + for (String s: progressMap.keySet()) { + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_VERTEX + s); + } + running = true; + } + + lastReport = printStatus(progressMap, lastReport, console); + break; + case SUCCEEDED: + lastReport = printStatus(progressMap, lastReport, console); + console.printInfo("Status: Finished successfully"); + running = false; + done = true; + break; + case KILLED: + console.printInfo("Status: Killed"); + running = false; + done = true; + rc = 1; + break; + case FAILED: + case ERROR: + console.printError("Status: Failed"); + running = false; + done = true; + rc = 2; + break; + } + } + if (!done) { + Thread.sleep(checkInterval); + } + } catch (Exception e) { + console.printInfo("Exception: "+e.getMessage()); + if (++failedCounter % maxRetryInterval/checkInterval == 0 + || e instanceof InterruptedException) { + try { + console.printInfo("Killing DAG..."); + dagClient.tryKillDAG(); + } catch(IOException io) { + // best effort + } catch(TezException te) { + // best effort + } + e.printStackTrace(); + console.printError("Execution has failed."); + rc = 1; + done = true; + } else { + console.printInfo("Retrying..."); + } + } finally { + if (done) { + if (rc != 0 && status != null) { + for (String diag: status.getDiagnostics()) { + console.printError(diag); + } + } + shutdownList.remove(dagClient); + break; + } + } + } + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_RUN_DAG); + return rc; + } + + private String printStatus(Map progressMap, String lastReport, LogHelper console) { + StringBuffer reportBuffer = new StringBuffer(); + + SortedSet keys = new TreeSet(progressMap.keySet()); + for (String s: keys) { + Progress progress = progressMap.get(s); + int complete = progress.getSucceededTaskCount(); + int total = progress.getTotalTaskCount(); + if (total <= 0) { + reportBuffer.append(String.format("%s: -/-\t", s, complete, total)); + } else { + if (complete == total && !completed.contains(s)) { + completed.add(s); + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_RUN_VERTEX + s); + } + reportBuffer.append(String.format("%s: %d/%d\t", s, complete, total)); + } + } + + String report = reportBuffer.toString(); + if (!report.equals(lastReport) || System.currentTimeMillis() >= lastPrintTime + printInterval) { + console.printInfo(report); + lastPrintTime = System.currentTimeMillis(); + } + + return report; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java (working copy) @@ -0,0 +1,168 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez; +import java.io.IOException; +import java.text.NumberFormat; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.ql.exec.tez.TezProcessor.KVOutputCollector; +import org.apache.hadoop.hive.ql.log.PerfLogger; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.tez.common.TezUtils; +import org.apache.tez.mapreduce.processor.MRTaskReporter; +import org.apache.tez.runtime.api.Event; +import org.apache.tez.runtime.api.LogicalIOProcessor; +import org.apache.tez.runtime.api.LogicalInput; +import org.apache.tez.runtime.api.LogicalOutput; +import org.apache.tez.runtime.api.TezProcessorContext; +import org.apache.tez.runtime.library.api.KeyValueWriter; + +/** + * Hive processor for Tez that forms the vertices in Tez and processes the data. + * Does what ExecMapper and ExecReducer does for hive in MR framework. + */ +public class TezProcessor implements LogicalIOProcessor { + private static final Log LOG = LogFactory.getLog(TezProcessor.class); + private boolean isMap = false; + + RecordProcessor rproc = null; + + private JobConf jobConf; + + private static final String CLASS_NAME = TezProcessor.class.getName(); + private final PerfLogger perfLogger = PerfLogger.getPerfLogger(); + + private TezProcessorContext processorContext; + + protected static final NumberFormat taskIdFormat = NumberFormat.getInstance(); + protected static final NumberFormat jobIdFormat = NumberFormat.getInstance(); + static { + taskIdFormat.setGroupingUsed(false); + taskIdFormat.setMinimumIntegerDigits(6); + jobIdFormat.setGroupingUsed(false); + jobIdFormat.setMinimumIntegerDigits(4); + } + + public TezProcessor(boolean isMap) { + this.isMap = isMap; + } + + @Override + public void close() throws IOException { + if(rproc != null){ + rproc.close(); + } + } + + @Override + public void handleEvents(List arg0) { + //this is not called by tez, so nothing to be done here + } + + @Override + public void initialize(TezProcessorContext processorContext) + throws IOException { + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_INITIALIZE_PROCESSOR); + this.processorContext = processorContext; + //get the jobconf + byte[] userPayload = processorContext.getUserPayload(); + Configuration conf = TezUtils.createConfFromUserPayload(userPayload); + this.jobConf = new JobConf(conf); + setupMRLegacyConfigs(processorContext); + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_INITIALIZE_PROCESSOR); + } + + private void setupMRLegacyConfigs(TezProcessorContext processorContext) { + // Hive "insert overwrite local directory" uses task id as dir name + // Setting the id in jobconf helps to have the similar dir name as MR + StringBuilder taskAttemptIdBuilder = new StringBuilder("task"); + taskAttemptIdBuilder.append(processorContext.getApplicationId().getClusterTimestamp()) + .append("_") + .append(jobIdFormat.format(processorContext.getApplicationId().getId())) + .append("_"); + if (isMap) { + taskAttemptIdBuilder.append("m_"); + } else { + taskAttemptIdBuilder.append("r_"); + } + taskAttemptIdBuilder.append(taskIdFormat.format(processorContext.getTaskIndex())) + .append("_") + .append(processorContext.getTaskAttemptNumber()); + + // In MR, mapreduce.task.attempt.id is same as mapred.task.id. Go figure. + String taskAttemptIdStr = taskAttemptIdBuilder.toString(); + this.jobConf.set("mapred.task.id", taskAttemptIdStr); + this.jobConf.set("mapreduce.task.attempt.id", taskAttemptIdStr); + } + + @Override + public void run(Map inputs, Map outputs) + throws Exception { + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_PROCESSOR); + // in case of broadcast-join read the broadcast edge inputs + // (possibly asynchronously) + + LOG.info("Running map: " + processorContext.getUniqueIdentifier()); + + Map outMap = new HashMap(); + + for (String outputName: outputs.keySet()) { + LOG.info("Handling output: " + outputName); + KeyValueWriter kvWriter = (KeyValueWriter) outputs.get(outputName).getWriter(); + OutputCollector collector = new KVOutputCollector(kvWriter); + outMap.put(outputName, collector); + } + + if(isMap){ + rproc = new MapRecordProcessor(); + } + else{ + rproc = new ReduceRecordProcessor(); + } + + MRTaskReporter mrReporter = new MRTaskReporter(processorContext); + rproc.init(jobConf, mrReporter, inputs, outMap); + rproc.run(); + + //done - output does not need to be committed as hive does not use outputcommitter + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_RUN_PROCESSOR); + } + + /** + * KVOutputCollector. OutputCollector that writes using KVWriter + * + */ + static class KVOutputCollector implements OutputCollector { + private final KeyValueWriter output; + + KVOutputCollector(KeyValueWriter output) { + this.output = output; + } + + public void collect(Object key, Object value) throws IOException { + output.write(key, value); + } + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java (working copy) @@ -0,0 +1,285 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import javax.security.auth.login.LoginException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.tez.client.AMConfiguration; +import org.apache.tez.client.TezSession; +import org.apache.tez.client.TezSessionConfiguration; +import org.apache.tez.dag.api.SessionNotRunning; +import org.apache.tez.dag.api.TezConfiguration; +import org.apache.tez.dag.api.TezException; +import org.apache.tez.mapreduce.hadoop.MRHelpers; + +/** + * Holds session state related to Tez + */ +public class TezSessionState { + + private static final Log LOG = LogFactory.getLog(TezSessionState.class.getName()); + private static final String TEZ_DIR = "_tez_session_dir"; + + private HiveConf conf; + private Path tezScratchDir; + private LocalResource appJarLr; + private TezSession session; + private String sessionId; + private DagUtils utils; + + private static List openSessions + = Collections.synchronizedList(new LinkedList()); + + /** + * Constructor. We do not automatically connect, because we only want to + * load tez classes when the user has tez installed. + */ + public TezSessionState(DagUtils utils) { + this.utils = utils; + } + + /** + * Constructor. We do not automatically connect, because we only want to + * load tez classes when the user has tez installed. + */ + public TezSessionState() { + this(DagUtils.getInstance()); + } + + /** + * Returns whether a session has been established + */ + public boolean isOpen() { + return session != null; + } + + /** + * Get all open sessions. Only used to clean up at shutdown. + * @return List + */ + public static List getOpenSessions() { + return openSessions; + } + + /** + * Creates a tez session. A session is tied to either a cli/hs2 session. You can + * submit multiple DAGs against a session (as long as they are executed serially). + * @throws IOException + * @throws URISyntaxException + * @throws LoginException + * @throws TezException + */ + public void open(String sessionId, HiveConf conf) + throws IOException, LoginException, URISyntaxException, TezException { + + this.sessionId = sessionId; + this.conf = conf; + + // create the tez tmp dir + tezScratchDir = createTezDir(sessionId); + + // generate basic tez config + TezConfiguration tezConfig = new TezConfiguration(conf); + + tezConfig.set(TezConfiguration.TEZ_AM_STAGING_DIR, tezScratchDir.toUri().toString()); + + // unless already installed on all the cluster nodes, we'll have to + // localize hive-exec.jar as well. + appJarLr = createHiveExecLocalResource(); + + // configuration for the application master + Map commonLocalResources = new HashMap(); + commonLocalResources.put(utils.getBaseName(appJarLr), appJarLr); + + AMConfiguration amConfig = new AMConfiguration(null, commonLocalResources, + tezConfig, null); + + // configuration for the session + TezSessionConfiguration sessionConfig = new TezSessionConfiguration(amConfig, tezConfig); + + // and finally we're ready to create and start the session + session = new TezSession("HIVE-"+sessionId, sessionConfig); + + LOG.info("Opening new Tez Session (id: "+sessionId+", scratch dir: "+tezScratchDir+")"); + session.start(); + + // In case we need to run some MR jobs, we'll run them under tez MR emulation. The session + // id is used for tez to reuse the current session rather than start a new one. + conf.set("mapreduce.framework.name", "yarn-tez"); + conf.set("mapreduce.tez.session.tokill-application-id", session.getApplicationId().toString()); + + openSessions.add(this); + } + + /** + * Close a tez session. Will cleanup any tez/am related resources. After closing a session + * no further DAGs can be executed against it. + * @throws IOException + * @throws TezException + */ + public void close(boolean keepTmpDir) throws TezException, IOException { + if (!isOpen()) { + return; + } + + LOG.info("Closing Tez Session"); + try { + session.stop(); + openSessions.remove(this); + } catch (SessionNotRunning nr) { + // ignore + } + + if (!keepTmpDir) { + FileSystem fs = tezScratchDir.getFileSystem(conf); + fs.delete(tezScratchDir, true); + } + session = null; + tezScratchDir = null; + conf = null; + appJarLr = null; + } + + public String getSessionId() { + return sessionId; + } + + public TezSession getSession() { + return session; + } + + public Path getTezScratchDir() { + return tezScratchDir; + } + + public LocalResource getAppJarLr() { + return appJarLr; + } + + /** + * createTezDir creates a temporary directory in the scratchDir folder to + * be used with Tez. Assumes scratchDir exists. + */ + private Path createTezDir(String sessionId) + throws IOException { + + // tez needs its own scratch dir (per session) + Path tezDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR), + TEZ_DIR); + tezDir = new Path(tezDir, sessionId); + FileSystem fs = tezDir.getFileSystem(conf); + fs.mkdirs(tezDir); + + // don't keep the directory around on non-clean exit + fs.deleteOnExit(tezDir); + + return tezDir; + } + + /** + * Returns a local resource representing the hive-exec jar. This resource will + * be used to execute the plan on the cluster. + * @param conf + * @return LocalResource corresponding to the localized hive exec resource. + * @throws IOException when any file system related call fails. + * @throws LoginException when we are unable to determine the user. + * @throws URISyntaxException when current jar location cannot be determined. + */ + private LocalResource createHiveExecLocalResource() + throws IOException, LoginException, URISyntaxException { + String hiveJarDir = conf.getVar(HiveConf.ConfVars.HIVE_JAR_DIRECTORY); + String currentVersionPathStr = utils.getExecJarPathLocal(); + String currentJarName = utils.getResourceBaseName(currentVersionPathStr); + FileSystem fs = null; + Path jarPath = null; + FileStatus dirStatus = null; + + if (hiveJarDir != null) { + // check if it is a valid directory in HDFS + Path hiveJarDirPath = new Path(hiveJarDir); + fs = hiveJarDirPath.getFileSystem(conf); + + if (!(fs instanceof DistributedFileSystem)) { + throw new IOException(ErrorMsg.INVALID_HDFS_URI.format(hiveJarDir)); + } + + try { + dirStatus = fs.getFileStatus(hiveJarDirPath); + } catch (FileNotFoundException fe) { + // do nothing + } + if ((dirStatus != null) && (dirStatus.isDir())) { + FileStatus[] listFileStatus = fs.listStatus(hiveJarDirPath); + for (FileStatus fstatus : listFileStatus) { + String jarName = utils.getResourceBaseName(fstatus.getPath().toString()); + if (jarName.equals(currentJarName)) { + // we have found the jar we need. + jarPath = fstatus.getPath(); + return utils.localizeResource(null, jarPath, conf); + } + } + + // jar wasn't in the directory, copy the one in current use + if (jarPath == null) { + Path dest = new Path(hiveJarDir + "/" + currentJarName); + return utils.localizeResource(new Path(currentVersionPathStr), dest, conf); + } + } + } + + /* + * specified location does not exist or is not a directory + * try to push the jar to the hdfs location pointed by + * config variable HIVE_INSTALL_DIR. Path will be + * HIVE_INSTALL_DIR/{username}/.hiveJars/ + */ + if ((hiveJarDir == null) || (dirStatus == null) || + ((dirStatus != null) && (!dirStatus.isDir()))) { + Path dest = utils.getDefaultDestDir(conf); + String destPathStr = dest.toString(); + String jarPathStr = destPathStr + "/" + currentJarName; + dirStatus = fs.getFileStatus(dest); + if (dirStatus.isDir()) { + return utils.localizeResource(new Path(currentVersionPathStr), new Path(jarPathStr), conf); + } else { + throw new IOException(ErrorMsg.INVALID_DIR.format(dest.toString())); + } + } + + // we couldn't find any valid locations. Throw exception + throw new IOException(ErrorMsg.NO_VALID_LOCATIONS.getMsg()); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java (working copy) @@ -0,0 +1,299 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.tez; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.DriverContext; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.log.PerfLogger; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.BaseWork; +import org.apache.hadoop.hive.ql.plan.TezWork; +import org.apache.hadoop.hive.ql.plan.TezWork.EdgeType; +import org.apache.hadoop.hive.ql.plan.api.StageType; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.tez.client.TezSession; +import org.apache.tez.common.counters.TezCounters; +import org.apache.tez.dag.api.DAG; +import org.apache.tez.dag.api.Edge; +import org.apache.tez.dag.api.SessionNotRunning; +import org.apache.tez.dag.api.TezException; +import org.apache.tez.dag.api.Vertex; +import org.apache.tez.dag.api.client.DAGClient; +import org.apache.tez.dag.api.client.StatusGetOpts; + +/** + * + * TezTask handles the execution of TezWork. Currently it executes a graph of map and reduce work + * using the Tez APIs directly. + * + */ +@SuppressWarnings({"serial", "deprecation"}) +public class TezTask extends Task { + + private static final String CLASS_NAME = TezTask.class.getName(); + private final PerfLogger perfLogger = PerfLogger.getPerfLogger(); + + private TezCounters counters; + + private DagUtils utils; + + public TezTask() { + this(DagUtils.getInstance()); + } + + public TezTask(DagUtils utils) { + super(); + this.utils = utils; + } + + public TezCounters getTezCounters() { + return counters; + } + + @Override + public int execute(DriverContext driverContext) { + int rc = 1; + boolean cleanContext = false; + Context ctx = null; + DAGClient client = null; + TezSessionState session = null; + + // Tez requires us to use RPC for the query plan + HiveConf.setBoolVar(conf, ConfVars.HIVE_RPC_QUERY_PLAN, true); + + try { + // Get or create Context object. If we create it we have to clean + // it later as well. + ctx = driverContext.getCtx(); + if (ctx == null) { + ctx = new Context(conf); + cleanContext = true; + } + + // Need to remove this static hack. But this is the way currently to + // get a session. + SessionState ss = SessionState.get(); + session = ss.getTezSession(); + + // if we don't have one yet create it. + if (session == null) { + ss.setTezSession(new TezSessionState()); + } + + // if it's not running start it. + if (!session.isOpen()) { + // can happen if the user sets the tez flag after the session was + // established + LOG.info("Tez session hasn't been created yet. Opening session"); + session.open(ss.getSessionId(), conf); + } + + // we will localize all the files (jars, plans, hashtables) to the + // scratch dir. let's create this first. + Path scratchDir = ctx.getMRScratchDir(); + + // create the tez tmp dir + utils.createTezDir(scratchDir, conf); + + // jobConf will hold all the configuration for hadoop, tez, and hive + JobConf jobConf = utils.createConfiguration(conf); + + // unless already installed on all the cluster nodes, we'll have to + // localize hive-exec.jar as well. + LocalResource appJarLr = session.getAppJarLr(); + + // next we translate the TezWork to a Tez DAG + DAG dag = build(jobConf, work, scratchDir, appJarLr, ctx); + + // submit will send the job to the cluster and start executing + client = submit(jobConf, dag, scratchDir, appJarLr, session); + + // finally monitor will print progress until the job is done + TezJobMonitor monitor = new TezJobMonitor(); + rc = monitor.monitorExecution(client); + + // fetch the counters + Set statusGetOpts = EnumSet.of(StatusGetOpts.GET_COUNTERS); + counters = client.getDAGStatus(statusGetOpts).getDAGCounters(); + + } catch (Exception e) { + LOG.error("Failed to execute tez graph.", e); + // rc will be 1 at this point indicating failure. + } finally { + Utilities.clearWork(conf); + if (cleanContext) { + try { + ctx.clear(); + } catch (Exception e) { + /*best effort*/ + LOG.warn("Failed to clean up after tez job"); + } + } + // need to either move tmp files or remove them + if (client != null) { + // rc will only be overwritten if close errors out + rc = close(work, rc); + } + } + return rc; + } + + DAG build(JobConf conf, TezWork work, Path scratchDir, + LocalResource appJarLr, Context ctx) + throws Exception { + + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_BUILD_DAG); + Map workToVertex = new HashMap(); + Map workToConf = new HashMap(); + + // we need to get the user specified local resources for this dag + List additionalLr = utils.localizeTempFiles(conf); + + // getAllWork returns a topologically sorted list, which we use to make + // sure that vertices are created before they are used in edges. + List ws = work.getAllWork(); + Collections.reverse(ws); + + Path tezDir = utils.getTezDir(scratchDir); + FileSystem fs = tezDir.getFileSystem(conf); + + // the name of the dag is what is displayed in the AM/Job UI + DAG dag = new DAG( + Utilities.abbreviate(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYSTRING), + HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVEJOBNAMELENGTH))); + + for (BaseWork w: ws) { + + boolean isFinal = work.getLeaves().contains(w); + + // translate work to vertex + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_CREATE_VERTEX + w.getName()); + JobConf wxConf = utils.initializeVertexConf(conf, w); + Vertex wx = utils.createVertex(wxConf, w, tezDir, + appJarLr, additionalLr, fs, ctx, !isFinal); + dag.addVertex(wx); + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_CREATE_VERTEX + w.getName()); + workToVertex.put(w, wx); + workToConf.put(w, wxConf); + + // add all dependencies (i.e.: edges) to the graph + for (BaseWork v: work.getChildren(w)) { + assert workToVertex.containsKey(v); + Edge e = null; + + EdgeType edgeType = work.getEdgeProperty(w, v); + + e = utils.createEdge(wxConf, wx, workToConf.get(v), workToVertex.get(v), edgeType); + dag.addEdge(e); + } + } + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_BUILD_DAG); + return dag; + } + + DAGClient submit(JobConf conf, DAG dag, Path scratchDir, + LocalResource appJarLr, TezSessionState sessionState) + throws IOException, TezException, InterruptedException, + LoginException, URISyntaxException, HiveException { + + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_SUBMIT_DAG); + DAGClient dagClient = null; + + try { + // ready to start execution on the cluster + dagClient = sessionState.getSession().submitDAG(dag); + } catch (SessionNotRunning nr) { + console.printInfo("Tez session was closed. Reopening..."); + + // close the old one, but keep the tmp files around + sessionState.close(true); + + // (re)open the session + sessionState.open(sessionState.getSessionId(), this.conf); + + console.printInfo("Session re-established."); + + dagClient = sessionState.getSession().submitDAG(dag); + } + + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_SUBMIT_DAG); + return dagClient; + } + + /* + * close will move the temp files into the right place for the fetch + * task. If the job has failed it will clean up the files. + */ + int close(TezWork work, int rc) { + try { + List ws = work.getAllWork(); + for (BaseWork w: ws) { + List> ops = w.getAllOperators(); + for (Operator op: ops) { + op.jobClose(conf, rc == 0); + } + } + } catch (Exception e) { + // jobClose needs to execute successfully otherwise fail task + if (rc == 0) { + rc = 3; + String mesg = "Job Commit failed with exception '" + + Utilities.getNameMessage(e) + "'"; + console.printError(mesg, "\n" + StringUtils.stringifyException(e)); + } + } + return rc; + } + + @Override + public boolean isMapRedTask() { + return true; + } + + @Override + public StageType getType() { + return StageType.MAPRED; + } + + @Override + public String getName() { + return "TEZ"; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/InputMerger.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/InputMerger.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/InputMerger.java (working copy) @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez.tools; + +import java.io.IOException; +import java.util.Comparator; +import java.util.List; +import java.util.PriorityQueue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.tez.ReduceRecordProcessor; +import org.apache.hadoop.io.BinaryComparable; +import org.apache.tez.runtime.library.api.KeyValuesReader; +import org.apache.tez.runtime.library.input.ShuffledMergedInput; + +/** + * A KeyValuesReader implementation that returns a sorted stream of key-values + * by doing a sorted merge of the key-value in ShuffledMergedInputs. + * Tags are in the last byte of the key, so no special handling for tags is required. + * Uses a priority queue to pick the KeyValuesReader of the input that is next in + * sort order. + */ +public class InputMerger implements KeyValuesReader { + + public static final Log l4j = LogFactory.getLog(ReduceRecordProcessor.class); + private PriorityQueue pQueue = null; + private KeyValuesReader nextKVReader = null; + + public InputMerger(List shuffleInputs) throws IOException { + //get KeyValuesReaders from the ShuffledMergedInput and add them to priority queue + int initialCapacity = shuffleInputs.size(); + pQueue = new PriorityQueue(initialCapacity, new KVReaderComparator()); + for(ShuffledMergedInput input : shuffleInputs){ + addToQueue(input.getReader()); + } + } + + /** + * Add KeyValuesReader to queue if it has more key-values + * @param kvsReadr + * @throws IOException + */ + private void addToQueue(KeyValuesReader kvsReadr) throws IOException{ + if(kvsReadr.next()){ + pQueue.add(kvsReadr); + } + } + + /** + * @return true if there are more key-values and advances to next key-values + * @throws IOException + */ + public boolean next() throws IOException { + //add the previous nextKVReader back to queue + if(nextKVReader != null){ + addToQueue(nextKVReader); + } + + //get the new nextKVReader with lowest key + nextKVReader = pQueue.poll(); + return nextKVReader != null; + } + + public Object getCurrentKey() throws IOException { + return nextKVReader.getCurrentKey(); + } + + public Iterable getCurrentValues() throws IOException { + return nextKVReader.getCurrentValues(); + } + + /** + * Comparator that compares KeyValuesReader on their current key + */ + class KVReaderComparator implements Comparator { + + @Override + public int compare(KeyValuesReader kvReadr1, KeyValuesReader kvReadr2) { + try { + BinaryComparable key1 = (BinaryComparable) kvReadr1.getCurrentKey(); + BinaryComparable key2 = (BinaryComparable) kvReadr2.getCurrentKey(); + return key1.compareTo(key2); + } catch (IOException e) { + l4j.error("Caught exception while reading shuffle input", e); + //die! + throw new RuntimeException(e); + } + } + } + + +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java (working copy) @@ -137,7 +137,7 @@ colNames = String.format("%s %s", colNames, colName); } - LOG.info(String.format("keyObjectInspector [%s]%s => %s", + LOG.debug(String.format("keyObjectInspector [%s]%s => %s", keyObjectInspector.getClass(), keyObjectInspector, colNames)); @@ -169,7 +169,7 @@ colNames = String.format("%s %s", colNames, colName); } - LOG.info(String.format("valueObjectInspector [%s]%s => %s", + LOG.debug(String.format("valueObjectInspector [%s]%s => %s", valueObjectInspector.getClass(), valueObjectInspector, colNames)); @@ -198,7 +198,7 @@ public void processOp(Object row, int tag) throws HiveException { VectorizedRowBatch vrg = (VectorizedRowBatch) row; - LOG.info(String.format("sinking %d rows, %d values, %d keys, %d parts", + LOG.debug(String.format("sinking %d rows, %d values, %d keys, %d parts", vrg.size, valueEval.length, keyEval.length, Index: ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java (working copy) @@ -24,6 +24,7 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -41,6 +42,7 @@ import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.PartitionDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; import org.apache.hadoop.hive.shims.ShimLoader; @@ -252,6 +254,24 @@ pathToPartitionInfo = mrwork.getPathToPartitionInfo(); } + private void addSplitsForGroup(List dirs, TableScanOperator tableScan, JobConf conf, + InputFormat inputFormat, Class inputFormatClass, int splits, + TableDesc table, List result) throws IOException { + + Utilities.copyTableJobPropertiesToConf(table, conf); + + if (tableScan != null) { + pushFilters(conf, tableScan); + } + + FileInputFormat.setInputPaths(conf, dirs.toArray(new Path[dirs.size()])); + conf.setInputFormat(inputFormat.getClass()); + InputSplit[] iss = inputFormat.getSplits(conf, splits); + for (InputSplit is : iss) { + result.add(new HiveInputSplit(is, inputFormatClass.getName())); + } + } + public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { PerfLogger perfLogger = PerfLogger.getPerfLogger(); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.GET_SPLITS); @@ -262,24 +282,28 @@ throw new IOException("No input paths specified in job"); } JobConf newjob = new JobConf(job); - ArrayList result = new ArrayList(); + List result = new ArrayList(); + List currentDirs = new ArrayList(); + Class currentInputFormatClass = null; + TableDesc currentTable = null; + TableScanOperator currentTableScan = null; + // for each dir, get the InputFormat, and do getSplits. for (Path dir : dirs) { PartitionDesc part = getPartitionDescFromPath(pathToPartitionInfo, dir); - // create a new InputFormat instance if this is the first time to see this - // class - Class inputFormatClass = part.getInputFileFormatClass(); - InputFormat inputFormat = getInputFormatFromCache(inputFormatClass, job); - Utilities.copyTableJobPropertiesToConf(part.getTableDesc(), newjob); + Class inputFormatClass = part.getInputFileFormatClass(); + TableDesc table = part.getTableDesc(); + TableScanOperator tableScan = null; + List aliases = + mrwork.getPathToAliases().get(dir.toUri().toString()); + // Make filter pushdown information available to getSplits. - ArrayList aliases = - mrwork.getPathToAliases().get(dir.toUri().toString()); if ((aliases != null) && (aliases.size() == 1)) { Operator op = mrwork.getAliasToWork().get(aliases.get(0)); if ((op != null) && (op instanceof TableScanOperator)) { - TableScanOperator tableScan = (TableScanOperator) op; + tableScan = (TableScanOperator) op; // push down projections. ColumnProjectionUtils.appendReadColumns( newjob, tableScan.getNeededColumnIDs(), tableScan.getNeededColumns()); @@ -288,14 +312,35 @@ } } - FileInputFormat.setInputPaths(newjob, dir); - newjob.setInputFormat(inputFormat.getClass()); - InputSplit[] iss = inputFormat.getSplits(newjob, numSplits / dirs.length); - for (InputSplit is : iss) { - result.add(new HiveInputSplit(is, inputFormatClass.getName())); + if (!currentDirs.isEmpty() && + inputFormatClass.equals(currentInputFormatClass) && + table.equals(currentTable) && + tableScan == currentTableScan) { + currentDirs.add(dir); + continue; } + + if (!currentDirs.isEmpty()) { + LOG.info("Generating splits"); + addSplitsForGroup(currentDirs, currentTableScan, newjob, + getInputFormatFromCache(currentInputFormatClass, job), + currentInputFormatClass, currentDirs.size()*(numSplits / dirs.length), + currentTable, result); + } + + currentDirs.clear(); + currentDirs.add(dir); + currentTableScan = tableScan; + currentTable = table; + currentInputFormatClass = inputFormatClass; } + LOG.info("Generating splits"); + addSplitsForGroup(currentDirs, currentTableScan, newjob, + getInputFormatFromCache(currentInputFormatClass, job), + currentInputFormatClass, currentDirs.size()*(numSplits / dirs.length), + currentTable, result); + LOG.info("number of splits " + result.size()); perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.GET_SPLITS); return result.toArray(new HiveInputSplit[result.size()]); Index: ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java (working copy) @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.io; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.session.SessionState; /** @@ -37,12 +38,19 @@ protected synchronized IOContext initialValue() { return new IOContext(); } }; + private static IOContext ioContext = new IOContext(); + public static IOContext get() { + if (SessionState.get() == null) { + // this happens on the backend. only one io context needed. + return ioContext; + } return IOContext.threadLocal.get(); } public static void clear() { IOContext.threadLocal.remove(); + ioContext = new IOContext(); } long currentBlockStart; Index: ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java (working copy) @@ -18,14 +18,15 @@ package org.apache.hadoop.hive.ql.io.orc; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.io.orc.Reader.FileMetaInfo; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import java.io.IOException; - /** * Contains factory methods to read or write ORC files. */ @@ -126,6 +127,11 @@ return new ReaderImpl(fs, path); } + public static Reader createReader(FileSystem fs, Path path, FileMetaInfo fileMetaInfo) + throws IOException { + return new ReaderImpl(fs, path, fileMetaInfo); + } + /** * Options for creating ORC file writers. */ @@ -307,4 +313,5 @@ } return memoryManager; } + } Index: ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java (working copy) @@ -27,6 +27,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -37,12 +38,18 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; import org.apache.hadoop.hive.ql.io.InputFormatChecker; +import org.apache.hadoop.hive.ql.io.orc.Metadata; +import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.FileGenerator; +import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.SplitGenerator; +import org.apache.hadoop.hive.ql.io.orc.Reader.FileMetaInfo; import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; +import org.apache.hadoop.hive.ql.log.PerfLogger; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument.TruthValue; import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.serde.serdeConstants; @@ -59,6 +66,10 @@ import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.StringUtils; + +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * A MapReduce/Hive input format for ORC files. */ @@ -70,9 +81,13 @@ private static final Log LOG = LogFactory.getLog(OrcInputFormat.class); static final String MIN_SPLIT_SIZE = "mapred.min.split.size"; static final String MAX_SPLIT_SIZE = "mapred.max.split.size"; + private static final long DEFAULT_MIN_SPLIT_SIZE = 16 * 1024 * 1024; private static final long DEFAULT_MAX_SPLIT_SIZE = 256 * 1024 * 1024; + private static final PerfLogger perfLogger = PerfLogger.getPerfLogger(); + private static final String CLASS_NAME = ReaderImpl.class.getName(); + /** * When picking the hosts for a split that crosses block boundaries, * any drop any host that has fewer than MIN_INCLUDED_LOCATION of the @@ -169,7 +184,7 @@ String serializedPushdown = conf.get(TableScanDesc.FILTER_EXPR_CONF_STR); if (serializedPushdown == null || conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR) == null) { - LOG.info("No ORC pushdown predicate"); + LOG.debug("No ORC pushdown predicate"); return null; } SearchArgument sarg = SearchArgument.FACTORY.create @@ -181,7 +196,9 @@ public static String[] getIncludedColumnNames( List types, boolean[] includedColumns, Configuration conf) { String columnNamesString = conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR); - LOG.info("included columns names = " + columnNamesString); + if (LOG.isDebugEnabled()) { + LOG.debug("included columns names = " + columnNamesString); + } if (columnNamesString == null || conf.get(TableScanDesc.FILTER_EXPR_CONF_STR) == null) { return null; } @@ -236,13 +253,27 @@ reporter); return (RecordReader) vorr; } + FileSplit fSplit = (FileSplit)inputSplit; + reporter.setStatus(fSplit.toString()); + Path path = fSplit.getPath(); + FileSystem fs = path.getFileSystem(conf); + Reader reader = null; - FileSplit fileSplit = (FileSplit) inputSplit; - Path path = fileSplit.getPath(); - FileSystem fs = path.getFileSystem(conf); - reporter.setStatus(fileSplit.toString()); - return new OrcRecordReader(OrcFile.createReader(fs, path), conf, - fileSplit.getStart(), fileSplit.getLength()); + if(!(fSplit instanceof OrcSplit)){ + //If CombineHiveInputFormat is used, it works with FileSplit and not OrcSplit + reader = OrcFile.createReader(fs, path); + } else { + //We have OrcSplit, which may have footer metadata cached, so use the appropriate reader + //constructor + OrcSplit orcSplit = (OrcSplit) fSplit; + if (orcSplit.hasFooter()) { + FileMetaInfo fMetaInfo = orcSplit.getFileMetaInfo(); + reader = OrcFile.createReader(fs, path, fMetaInfo); + } else { + reader = OrcFile.createReader(fs, path); + } + } + return new OrcRecordReader(reader, conf, fSplit.getStart(), fSplit.getLength()); } @Override @@ -299,13 +330,19 @@ * the different worker threads. */ static class Context { - private final ExecutorService threadPool = Executors.newFixedThreadPool(10); - private final List splits = new ArrayList(10000); + private final Configuration conf; + private static Cache footerCache; + private final ExecutorService threadPool; + private final List splits = new ArrayList(10000); private final List errors = new ArrayList(); private final HadoopShims shims = ShimLoader.getHadoopShims(); - private final Configuration conf; private final long maxSize; private final long minSize; + private final boolean footerInSplits; + private final boolean cacheStripeDetails; + private final AtomicInteger cacheHitCounter = new AtomicInteger(0); + private final AtomicInteger numFilesCounter = new AtomicInteger(0); + private Throwable fatalError = null; /** * A count of the number of threads that may create more work for the @@ -317,6 +354,22 @@ this.conf = conf; minSize = conf.getLong(MIN_SPLIT_SIZE, DEFAULT_MIN_SPLIT_SIZE); maxSize = conf.getLong(MAX_SPLIT_SIZE, DEFAULT_MAX_SPLIT_SIZE); + footerInSplits = HiveConf.getBoolVar(conf, ConfVars.HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS); + int cacheStripeDetailsSize = HiveConf.getIntVar(conf, + ConfVars.HIVE_ORC_CACHE_STRIPE_DETAILS_SIZE); + int numThreads = HiveConf.getIntVar(conf, ConfVars.HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS); + + cacheStripeDetails = (cacheStripeDetailsSize > 0); + + threadPool = Executors.newFixedThreadPool(numThreads, + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("ORC_GET_SPLITS #%d").build()); + + synchronized (Context.class) { + if (footerCache == null && cacheStripeDetails) { + footerCache = CacheBuilder.newBuilder().concurrencyLevel(numThreads) + .initialCapacity(cacheStripeDetailsSize).softValues().build(); + } + } } int getSchedulers() { @@ -329,7 +382,7 @@ * the back. * @result the Nth file split */ - FileSplit getResult(int index) { + OrcSplit getResult(int index) { if (index >= 0) { return splits.get(index); } else { @@ -346,10 +399,14 @@ * @param runnable the object to run */ synchronized void schedule(Runnable runnable) { - if (runnable instanceof FileGenerator) { - schedulers += 1; + if (fatalError == null) { + if (runnable instanceof FileGenerator || runnable instanceof SplitGenerator) { + schedulers += 1; + } + threadPool.execute(runnable); + } else { + throw new RuntimeException("serious problem", fatalError); } - threadPool.execute(runnable); } /** @@ -362,6 +419,11 @@ } } + synchronized void notifyOnNonIOException(Throwable th) { + fatalError = th; + notify(); + } + /** * Wait until all of the tasks are done. It waits until all of the * threads that may create more work are done and then shuts down the @@ -371,6 +433,10 @@ try { while (schedulers != 0) { wait(); + if (fatalError != null) { + threadPool.shutdownNow(); + throw new RuntimeException("serious problem", fatalError); + } } threadPool.shutdown(); threadPool.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); @@ -406,18 +472,57 @@ while (itr.hasNext()) { FileStatus file = itr.next(); if (!file.isDir()) { - context.schedule(new SplitGenerator(context, fs, file)); + FileInfo fileInfo = null; + if (context.cacheStripeDetails) { + fileInfo = verifyCachedFileInfo(file); + } + SplitGenerator spgen = new SplitGenerator(context, fs, file, fileInfo); + spgen.schedule(); } } - // mark the fact that we are done - context.decrementSchedulers(); } catch (Throwable th) { - context.decrementSchedulers(); + if (!(th instanceof IOException)) { + LOG.error("Unexpected Exception", th); + } synchronized (context.errors) { context.errors.add(th); } + if (!(th instanceof IOException)) { + context.notifyOnNonIOException(th); + } + } finally { + context.decrementSchedulers(); } } + + private FileInfo verifyCachedFileInfo(FileStatus file) { + context.numFilesCounter.incrementAndGet(); + FileInfo fileInfo = Context.footerCache.getIfPresent(file.getPath()); + if (fileInfo != null) { + if (LOG.isDebugEnabled()) { + LOG.debug("Info cached for path: " + file.getPath()); + } + if (fileInfo.modificationTime == file.getModificationTime() && fileInfo.size == file.getLen()) { + // Cached copy is valid + context.cacheHitCounter.incrementAndGet(); + return fileInfo; + } else { + // Invalidate + Context.footerCache.invalidate(file.getPath()); + if (LOG.isDebugEnabled()) { + LOG.debug("Meta-Info for : " + file.getPath() + " changed. CachedModificationTime: " + + fileInfo.modificationTime + ", CurrentModificationTime: " + + file.getModificationTime() + + ", CachedLength: " + fileInfo.size + ", CurrentLength: " + file.getLen()); + } + } + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Info not cached for path: " + file.getPath()); + } + } + return null; + } } /** @@ -430,13 +535,20 @@ private final FileStatus file; private final long blockSize; private final BlockLocation[] locations; + private final FileInfo fileInfo; + private Iterable stripes; + private FileMetaInfo fileMetaInfo; + private Metadata metadata; + private List types; + SplitGenerator(Context context, FileSystem fs, - FileStatus file) throws IOException { + FileStatus file, FileInfo fileInfo) throws IOException { this.context = context; this.fs = fs; this.file = file; this.blockSize = file.getBlockSize(); + this.fileInfo = fileInfo; locations = context.shims.getLocations(fs, file); } @@ -444,6 +556,19 @@ return file.getPath(); } + void schedule() throws IOException { + if(locations.length == 1 && file.getLen() < context.maxSize) { + String[] hosts = locations[0].getHosts(); + synchronized (context.splits) { + context.splits.add(new OrcSplit(file.getPath(), 0, file.getLen(), + hosts, fileMetaInfo)); + } + } else { + // if it requires a compute task + context.schedule(this); + } + } + @Override public String toString() { return "splitter(" + file.getPath() + ")"; @@ -475,9 +600,10 @@ * are written with large block sizes. * @param offset the start of the split * @param length the length of the split + * @param fileMetaInfo file metadata from footer and postscript * @throws IOException */ - void createSplit(long offset, long length) throws IOException { + void createSplit(long offset, long length, FileMetaInfo fileMetaInfo) throws IOException { String[] hosts; if ((offset % blockSize) + length <= blockSize) { // handle the single block case @@ -521,8 +647,8 @@ hostList.toArray(hosts); } synchronized (context.splits) { - context.splits.add(new FileSplit(file.getPath(), offset, length, - hosts)); + context.splits.add(new OrcSplit(file.getPath(), offset, length, + hosts, fileMetaInfo)); } } @@ -533,9 +659,8 @@ @Override public void run() { try { - Reader orcReader = OrcFile.createReader(fs, file.getPath()); + populateAndCacheStripeDetails(); Configuration conf = context.conf; - List types = orcReader.getTypes(); SearchArgument sarg = createSarg(types, conf); List stripeStats = null; int[] filterColumns = null; @@ -558,7 +683,6 @@ } } - Metadata metadata = orcReader.getMetadata(); stripeStats = metadata.getStripeStatistics(); } @@ -565,7 +689,7 @@ long currentOffset = -1; long currentLength = 0; int idx = -1; - for(StripeInformation stripe: orcReader.getStripes()) { + for(StripeInformation stripe: stripes) { idx++; // eliminate stripes that doesn't satisfy the predicate condition @@ -579,7 +703,7 @@ // create split for the previous unfinished stripe if (currentOffset != -1) { - createSplit(currentOffset, currentLength); + createSplit(currentOffset, currentLength, fileMetaInfo); currentOffset = -1; } continue; @@ -589,7 +713,7 @@ // crossed a block boundary, cut the input split here. if (currentOffset != -1 && currentLength > context.minSize && (currentOffset / blockSize != stripe.getOffset() / blockSize)) { - createSplit(currentOffset, currentLength); + createSplit(currentOffset, currentLength, fileMetaInfo); currentOffset = -1; } // if we aren't building a split, start a new one. @@ -600,20 +724,72 @@ currentLength += stripe.getLength(); } if (currentLength >= context.maxSize) { - createSplit(currentOffset, currentLength); + createSplit(currentOffset, currentLength, fileMetaInfo); currentOffset = -1; } } if (currentOffset != -1) { - createSplit(currentOffset, currentLength); + createSplit(currentOffset, currentLength, fileMetaInfo); } } catch (Throwable th) { + if (!(th instanceof IOException)) { + LOG.error("Unexpected Exception", th); + } synchronized (context.errors) { context.errors.add(th); } + if (!(th instanceof IOException)) { + context.notifyOnNonIOException(th); + } + } finally { + context.decrementSchedulers(); } } + private void populateAndCacheStripeDetails() { + try { + Reader orcReader; + boolean found = false; + if (fileInfo != null) { + found = true; + stripes = fileInfo.stripeInfos; + fileMetaInfo = fileInfo.fileMetaInfo; + metadata = fileInfo.metadata; + types = fileInfo.types; + // For multiple runs, in case sendSplitsInFooter changes + if (fileMetaInfo == null && context.footerInSplits) { + orcReader = OrcFile.createReader(fs, file.getPath()); + fileInfo.fileMetaInfo = orcReader.getFileMetaInfo(); + fileInfo.metadata = orcReader.getMetadata(); + fileInfo.types = orcReader.getTypes(); + } + } + if (!found) { + orcReader = OrcFile.createReader(fs, file.getPath()); + stripes = orcReader.getStripes(); + metadata = orcReader.getMetadata(); + types = orcReader.getTypes(); + fileMetaInfo = context.footerInSplits ? orcReader.getFileMetaInfo() : null; + if (context.cacheStripeDetails) { + // Populate into cache. + Context.footerCache.put(file.getPath(), + new FileInfo(file.getModificationTime(), file.getLen(), stripes, metadata, + types, fileMetaInfo)); + } + } + } catch (Throwable th) { + if (!(th instanceof IOException)) { + LOG.error("Unexpected Exception", th); + } + synchronized (context.errors) { + context.errors.add(th); + } + if (!(th instanceof IOException)) { + context.notifyOnNonIOException(th); + } + } + } + private boolean containsColumn(String[] neededColumns, String colName) { for (String col : neededColumns) { if (colName.equalsIgnoreCase(col)) { @@ -677,7 +853,6 @@ return null; } } - } @Override @@ -684,6 +859,7 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { // use threads to resolve directories into splits + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ORC_GET_SPLITS); Context context = new Context(job); for(Path dir: getInputPaths(job)) { FileSystem fs = dir.getFileSystem(job); @@ -698,7 +874,7 @@ if (th instanceof IOException) { errors.add((IOException) th); } else { - throw new IOException("serious problem", th); + throw new RuntimeException("serious problem", th); } } throw new InvalidInputException(errors); @@ -705,6 +881,37 @@ } InputSplit[] result = new InputSplit[context.splits.size()]; context.splits.toArray(result); + if (context.cacheStripeDetails) { + LOG.info("FooterCacheHitRatio: " + context.cacheHitCounter.get() + "/" + + context.numFilesCounter.get()); + } + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ORC_GET_SPLITS); return result; } + + /** + * FileInfo. + * + * Stores information relevant to split generation for an ORC File. + * + */ + private static class FileInfo { + long modificationTime; + long size; + Iterable stripeInfos; + FileMetaInfo fileMetaInfo; + Metadata metadata; + List types; + + + FileInfo(long modificationTime, long size, Iterable stripeInfos, + Metadata metadata, List types, FileMetaInfo fileMetaInfo) { + this.modificationTime = modificationTime; + this.size = size; + this.stripeInfos = stripeInfos; + this.fileMetaInfo = fileMetaInfo; + this.metadata = metadata; + this.types = types; + } + } } Index: ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java (working copy) @@ -0,0 +1,92 @@ +package org.apache.hadoop.hive.ql.io.orc; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.io.orc.Reader.FileMetaInfo; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.mapred.FileSplit; + + + +/** + * OrcFileSplit. Holds file meta info + * + */ +public class OrcSplit extends FileSplit { + private Reader.FileMetaInfo fileMetaInfo; + private boolean hasFooter; + + protected OrcSplit(){ + //The FileSplit() constructor in hadoop 0.20 and 1.x is package private so can't use it. + //This constructor is used to create the object and then call readFields() + // so just pass nulls to this super constructor. + super(null, 0, 0, (String[])null); + } + + public OrcSplit(Path path, long offset, long length, String[] hosts, + FileMetaInfo fileMetaInfo) { + super(path, offset, length, hosts); + this.fileMetaInfo = fileMetaInfo; + hasFooter = this.fileMetaInfo != null; + } + + @Override + public void write(DataOutput out) throws IOException { + //serialize path, offset, length using FileSplit + super.write(out); + + // Whether footer information follows. + out.writeBoolean(hasFooter); + + if (hasFooter) { + // serialize FileMetaInfo fields + Text.writeString(out, fileMetaInfo.compressionType); + WritableUtils.writeVInt(out, fileMetaInfo.bufferSize); + WritableUtils.writeVInt(out, fileMetaInfo.metadataSize); + + // serialize FileMetaInfo field footer + ByteBuffer footerBuff = fileMetaInfo.footerBuffer; + footerBuff.reset(); + + // write length of buffer + WritableUtils.writeVInt(out, footerBuff.limit() - footerBuff.position()); + out.write(footerBuff.array(), footerBuff.position(), + footerBuff.limit() - footerBuff.position()); + } + } + + @Override + public void readFields(DataInput in) throws IOException { + //deserialize path, offset, length using FileSplit + super.readFields(in); + + hasFooter = in.readBoolean(); + + if (hasFooter) { + // deserialize FileMetaInfo fields + String compressionType = Text.readString(in); + int bufferSize = WritableUtils.readVInt(in); + int metadataSize = WritableUtils.readVInt(in); + + // deserialize FileMetaInfo field footer + int footerBuffSize = WritableUtils.readVInt(in); + ByteBuffer footerBuff = ByteBuffer.allocate(footerBuffSize); + in.readFully(footerBuff.array(), 0, footerBuffSize); + + fileMetaInfo = new FileMetaInfo(compressionType, bufferSize, metadataSize, footerBuff); + } + } + + public FileMetaInfo getFileMetaInfo(){ + return fileMetaInfo; + } + + public boolean hasFooter() { + return hasFooter; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java (working copy) @@ -122,6 +122,30 @@ List getTypes(); /** + * FileMetaInfo - represents file metadata stored in footer and postscript sections of the file + * that is useful for Reader implementation + * + */ + class FileMetaInfo{ + final String compressionType; + final int bufferSize; + final int metadataSize; + final ByteBuffer footerBuffer; + FileMetaInfo(String compressionType, int bufferSize, int metadataSize, ByteBuffer footerBuffer){ + this.compressionType = compressionType; + this.bufferSize = bufferSize; + this.metadataSize = metadataSize; + this.footerBuffer = footerBuffer; + } + } + + /** + * Get the metadata stored in footer and postscript sections of the file + * @return MetaInfo object with file metadata + */ + FileMetaInfo getFileMetaInfo(); + + /** * Create a RecordReader that will scan the entire file. * @param include true for each column that should be included * @return A new RecordReader Index: ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java (working copy) @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.io.orc.OrcProto.Type; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; +import org.apache.hadoop.hive.ql.log.PerfLogger; import org.apache.hadoop.hive.ql.util.JavaDataModel; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.io.Text; @@ -56,11 +57,18 @@ private final int bufferSize; private OrcProto.Metadata metadata = null; private final int metadataSize; - private final int footerOffset; private final OrcProto.Footer footer; private final ObjectInspector inspector; private long deserializedSize = -1; + //serialized footer - Keeping this around for use by getFileMetaInfo() + // will help avoid cpu cycles spend in deserializing at cost of increased + // memory footprint. + private final ByteBuffer footerByteBuffer; + + private static final PerfLogger perfLogger = PerfLogger.getPerfLogger(); + private static final String CLASS_NAME = ReaderImpl.class.getName(); + private static class StripeInformationImpl implements StripeInformation { private final OrcProto.StripeInformation stripe; @@ -276,10 +284,66 @@ } } + /** + * Constructor that extracts metadata information from file footer + * @param fs + * @param path + * @throws IOException + */ ReaderImpl(FileSystem fs, Path path) throws IOException { this.fileSystem = fs; this.path = path; + + FileMetaInfo footerMetaData = extractMetaInfoFromFooter(fs, path); + + MetaInfoObjExtractor rInfo = new MetaInfoObjExtractor(footerMetaData.compressionType, + footerMetaData.bufferSize, footerMetaData.metadataSize, footerMetaData.footerBuffer); + + this.footerByteBuffer = footerMetaData.footerBuffer; + this.compressionKind = rInfo.compressionKind; + this.codec = rInfo.codec; + this.bufferSize = rInfo.bufferSize; + this.metadataSize = rInfo.metadataSize; + this.metadata = rInfo.metadata; + this.footer = rInfo.footer; + this.inspector = rInfo.inspector; + } + + + /** + * Constructor that takes already saved footer meta information. Used for creating RecordReader + * from saved information in InputSplit + * @param fs + * @param path + * @param fMetaInfo + * @throws IOException + */ + ReaderImpl(FileSystem fs, Path path, FileMetaInfo fMetaInfo) + throws IOException { + this.fileSystem = fs; + this.path = path; + + MetaInfoObjExtractor rInfo = new MetaInfoObjExtractor( + fMetaInfo.compressionType, + fMetaInfo.bufferSize, + fMetaInfo.metadataSize, + fMetaInfo.footerBuffer + ); + this.footerByteBuffer = fMetaInfo.footerBuffer; + this.compressionKind = rInfo.compressionKind; + this.codec = rInfo.codec; + this.bufferSize = rInfo.bufferSize; + this.metadataSize = rInfo.metadataSize; + this.metadata = rInfo.metadata; + this.footer = rInfo.footer; + this.inspector = rInfo.inspector; + } + + + private static FileMetaInfo extractMetaInfoFromFooter(FileSystem fs, Path path) throws IOException { FSDataInputStream file = fs.open(path); + + //read last bytes into buffer to get PostScript long size = fs.getFileStatus(path).getLen(); int readSize = (int) Math.min(size, DIRECTORY_SIZE_GUESS); file.seek(size - readSize); @@ -286,6 +350,9 @@ ByteBuffer buffer = ByteBuffer.allocate(readSize); file.readFully(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining()); + + //read the PostScript + //get length of PostScript int psLen = buffer.get(readSize - 1) & 0xff; ensureOrcFooter(file, path, psLen, buffer); int psOffset = readSize - 1 - psLen; @@ -292,62 +359,110 @@ CodedInputStream in = CodedInputStream.newInstance(buffer.array(), buffer.arrayOffset() + psOffset, psLen); OrcProto.PostScript ps = OrcProto.PostScript.parseFrom(in); + checkOrcVersion(LOG, path, ps.getVersionList()); + int footerSize = (int) ps.getFooterLength(); - metadataSize = (int) ps.getMetadataLength(); - footerOffset = (int) (size - ( psLen + 1 + footerSize)); - bufferSize = (int) ps.getCompressionBlockSize(); + int metadataSize = (int) ps.getMetadataLength(); + + //check compression codec switch (ps.getCompression()) { case NONE: - compressionKind = CompressionKind.NONE; break; case ZLIB: - compressionKind = CompressionKind.ZLIB; break; case SNAPPY: - compressionKind = CompressionKind.SNAPPY; break; case LZO: - compressionKind = CompressionKind.LZO; break; default: throw new IllegalArgumentException("Unknown compression"); } - codec = WriterImpl.createCodec(compressionKind); - int extra = Math.max(0, psLen + 1 + footerSize - readSize); + + //check if extra bytes need to be read + int extra = Math.max(0, psLen + 1 + footerSize + metadataSize - readSize); if (extra > 0) { + //more bytes need to be read, seek back to the right place and read extra bytes file.seek(size - readSize - extra); ByteBuffer extraBuf = ByteBuffer.allocate(extra + readSize); file.readFully(extraBuf.array(), extraBuf.arrayOffset() + extraBuf.position(), extra); extraBuf.position(extra); + //append with already read bytes extraBuf.put(buffer); buffer = extraBuf; buffer.position(0); - buffer.limit(footerSize); + buffer.limit(footerSize + metadataSize); } else { - buffer.position(psOffset - footerSize); + //footer is already in the bytes in buffer, just adjust position, length + buffer.position(psOffset - footerSize - metadataSize); buffer.limit(psOffset); } - // read footer - InputStream instream = InStream.create("footer", new ByteBuffer[]{buffer}, - new long[]{0L}, footerSize, codec, bufferSize); - footer = OrcProto.Footer.parseFrom(instream); - inspector = OrcStruct.createObjectInspector(0, footer.getTypesList()); - // if metadata is already contained in first 16K file read then parse it - // else do it lazily - if(extra == 0) { - buffer.position(psOffset - (footerSize + metadataSize)); - buffer.limit(psOffset - footerSize); - instream = InStream.create("metadata", new ByteBuffer[]{buffer}, + // remember position for later + buffer.mark(); + + file.close(); + + return new FileMetaInfo( + ps.getCompression().toString(), + (int) ps.getCompressionBlockSize(), + (int) ps.getMetadataLength(), + buffer + ); + } + + + + /** + * MetaInfoObjExtractor - has logic to create the values for the fields in ReaderImpl + * from serialized fields. + * As the fields are final, the fields need to be initialized in the constructor and + * can't be done in some helper function. So this helper class is used instead. + * + */ + private static class MetaInfoObjExtractor{ + final CompressionKind compressionKind; + final CompressionCodec codec; + final int bufferSize; + final int metadataSize; + final OrcProto.Metadata metadata; + final OrcProto.Footer footer; + final ObjectInspector inspector; + + MetaInfoObjExtractor(String codecStr, int bufferSize, int metadataSize, + ByteBuffer footerBuffer) throws IOException { + + this.compressionKind = CompressionKind.valueOf(codecStr); + this.bufferSize = bufferSize; + this.codec = WriterImpl.createCodec(compressionKind); + this.metadataSize = metadataSize; + + int position = footerBuffer.position(); + int footerBufferSize = footerBuffer.limit() - footerBuffer.position() - metadataSize; + footerBuffer.limit(position + metadataSize); + + InputStream instream = InStream.create("metadata", new ByteBuffer[]{footerBuffer}, new long[]{0L}, metadataSize, codec, bufferSize); - metadata = OrcProto.Metadata.parseFrom(instream); + this.metadata = OrcProto.Metadata.parseFrom(instream); + + footerBuffer.position(position + metadataSize); + footerBuffer.limit(position + metadataSize + footerBufferSize); + instream = InStream.create("footer", new ByteBuffer[]{footerBuffer}, + new long[]{0L}, footerBufferSize, codec, bufferSize); + this.footer = OrcProto.Footer.parseFrom(instream); + + footerBuffer.position(position); + this.inspector = OrcStruct.createObjectInspector(0, footer.getTypesList()); } + } - file.close(); + public FileMetaInfo getFileMetaInfo(){ + return new FileMetaInfo(compressionKind.toString(), bufferSize, metadataSize, footerByteBuffer); } + + @Override public RecordReader rows(boolean[] include) throws IOException { return rows(0, Long.MAX_VALUE, include, null, null); @@ -497,20 +612,6 @@ @Override public Metadata getMetadata() throws IOException { - // if metadata is not parsed already then read and parse it - if (metadata == null && metadataSize > 0) { - FSDataInputStream file = this.fileSystem.open(path); - file.seek(footerOffset - metadataSize); - ByteBuffer buffer = ByteBuffer.allocate(metadataSize); - file.readFully(buffer.array(), buffer.arrayOffset() + buffer.position(), - buffer.remaining()); - buffer.position(0); - buffer.limit(metadataSize); - InputStream instream = InStream.create("metadata", new ByteBuffer[] {buffer}, - new long[] {0L}, metadataSize, codec, bufferSize); - metadata = OrcProto.Metadata.parseFrom(instream); - file.close(); - } return new Metadata(metadata); } Index: ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java (working copy) @@ -31,6 +31,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx; import org.apache.hadoop.hive.ql.io.InputFormatChecker; +import org.apache.hadoop.hive.ql.io.orc.Reader.FileMetaInfo; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.io.NullWritable; @@ -140,11 +141,30 @@ public RecordReader getRecordReader(InputSplit inputSplit, JobConf conf, Reporter reporter) throws IOException { - FileSplit fileSplit = (FileSplit) inputSplit; - Path path = fileSplit.getPath(); + FileSplit fSplit = (FileSplit)inputSplit; + reporter.setStatus(fSplit.toString()); + + Path path = fSplit.getPath(); FileSystem fs = path.getFileSystem(conf); - reporter.setStatus(fileSplit.toString()); - return new VectorizedOrcRecordReader(OrcFile.createReader(fs, path), conf, fileSplit); + + Reader reader = null; + + if(!(fSplit instanceof OrcSplit)){ + //If CombineHiveInputFormat is used, it works with FileSplit and not OrcSplit + reader = OrcFile.createReader(fs, path); + } else { + //We have OrcSplit, which may have footer metadata cached, so use the appropriate reader + //constructor + OrcSplit orcSplit = (OrcSplit) fSplit; + if (orcSplit.hasFooter()) { + FileMetaInfo fMetaInfo = orcSplit.getFileMetaInfo(); + reader = OrcFile.createReader(fs, path, fMetaInfo); + } else { + reader = OrcFile.createReader(fs, path); + } + } + + return new VectorizedOrcRecordReader(reader, conf, fSplit); } @Override Index: ql/src/java/org/apache/hadoop/hive/ql/lib/CompositeProcessor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/lib/CompositeProcessor.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/lib/CompositeProcessor.java (working copy) @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.lib; + +import java.util.Stack; + +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * CompositeProcessor. Holds a list of node processors to be fired by the same + * rule. + * + */ +public class CompositeProcessor implements NodeProcessor { + + NodeProcessor[] procs; + + public CompositeProcessor(NodeProcessor...nodeProcessors) { + procs = nodeProcessors; + } + + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) + throws SemanticException { + for (NodeProcessor proc: procs) { + proc.process(nd, stack, procCtx, nodeOutputs); + } + return null; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java (working copy) @@ -36,9 +36,9 @@ public class DefaultGraphWalker implements GraphWalker { protected Stack opStack; - private final List toWalk = new ArrayList(); - private final HashMap retMap = new HashMap(); - private final Dispatcher dispatcher; + protected final List toWalk = new ArrayList(); + protected final HashMap retMap = new HashMap(); + protected final Dispatcher dispatcher; /** * Constructor. Index: ql/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java (working copy) @@ -53,6 +53,17 @@ public static final String FAILURE_HOOK = "FailureHook."; public static final String DRIVER_RUN = "Driver.run"; public static final String TIME_TO_SUBMIT = "TimeToSubmit"; + public static final String TEZ_SUBMIT_TO_RUNNING = "TezSubmitToRunningDag"; + public static final String TEZ_BUILD_DAG = "TezBuildDag"; + public static final String TEZ_SUBMIT_DAG = "TezSubmitDag"; + public static final String TEZ_RUN_DAG = "TezRunDag"; + public static final String TEZ_CREATE_VERTEX = "TezCreateVertex."; + public static final String TEZ_RUN_VERTEX = "TezRunVertex."; + public static final String TEZ_INITIALIZE_PROCESSOR = "TezInitializeProcessor"; + public static final String TEZ_RUN_PROCESSOR = "TezRunProcessor"; + public static final String TEZ_INIT_OPERATORS = "TezInitializeOperators"; + public static final String LOAD_HASHTABLE = "LoadHashtable"; + public static final String ORC_GET_SPLITS = "OrcGetSplits"; protected static final ThreadLocal perfLogger = new ThreadLocal(); Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (working copy) @@ -2478,7 +2478,7 @@ try { return getMSC().updateTableColumnStatistics(statsObj); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); } } @@ -2487,7 +2487,7 @@ try { return getMSC().updatePartitionColumnStatistics(statsObj); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); } } @@ -2497,7 +2497,7 @@ try { return getMSC().getTableColumnStatistics(dbName, tableName, colName); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); } @@ -2508,7 +2508,7 @@ try { return getMSC().getPartitionColumnStatistics(dbName, tableName, partName, colName); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); } } @@ -2518,7 +2518,7 @@ try { return getMSC().deleteTableColumnStatistics(dbName, tableName, colName); } catch(Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); } } @@ -2528,7 +2528,7 @@ try { return getMSC().deletePartitionColumnStatistics(dbName, tableName, partName, colName); } catch(Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); } } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java (working copy) @@ -473,7 +473,7 @@ JoinDesc joinDesc = joinOp.getConf(); JoinCondDesc[] joinCondns = joinDesc.getConds(); Set joinCandidates = MapJoinProcessor.getBigTableCandidates(joinCondns); - if (joinCandidates == null) { + if (joinCandidates.isEmpty()) { // This is a full outer join. This can never be a map-join // of any type. So return false. return false; @@ -527,6 +527,7 @@ SortBucketJoinProcCtx joinContext, ParseContext parseContext) throws SemanticException { MapJoinOperator mapJoinOp = MapJoinProcessor.convertMapJoin( + parseContext.getConf(), parseContext.getOpParseCtx(), joinOp, pGraphContext.getJoinContext().get(joinOp), Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java (working copy) @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.optimizer; + +import java.util.Set; +import java.util.Stack; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.JoinOperator; +import org.apache.hadoop.hive.ql.exec.MapJoinOperator; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.parse.OptimizeTezProcContext; +import org.apache.hadoop.hive.ql.parse.ParseContext; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.Statistics; + +/** + * ConvertJoinMapJoin is an optimization that replaces a commone join + * (aka shuffle join) with a map join (aka broadcast or fragment replicate + * join when possible. Map joins have restrictions on which joins can be + * converted (e.g.: full outer joins cannot be handled as map joins) as well + * as memory restrictions (one side of the join has to fit into memory). + */ +public class ConvertJoinMapJoin implements NodeProcessor { + + static final private Log LOG = LogFactory.getLog(ConvertJoinMapJoin.class.getName()); + + @Override + /* + * (non-Javadoc) + * we should ideally not modify the tree we traverse. + * However, since we need to walk the tree at any time when we modify the + * operator, we might as well do it here. + */ + public Object process(Node nd, Stack stack, + NodeProcessorCtx procCtx, Object... nodeOutputs) + throws SemanticException { + + OptimizeTezProcContext context = (OptimizeTezProcContext) procCtx; + + if (!context.conf.getBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN)) { + return null; + } + + JoinOperator joinOp = (JoinOperator) nd; + + Set bigTableCandidateSet = MapJoinProcessor. + getBigTableCandidates(joinOp.getConf().getConds()); + + long maxSize = context.conf.getLongVar( + HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD); + + int bigTablePosition = -1; + + Statistics bigInputStat = null; + long totalSize = 0; + int pos = 0; + + // bigTableFound means we've encountered a table that's bigger than the + // max. This table is either the the big table or we cannot convert. + boolean bigTableFound = false; + + for (Operator parentOp : joinOp.getParentOperators()) { + + Statistics currInputStat = parentOp.getStatistics(); + if (currInputStat == null) { + LOG.warn("Couldn't get statistics from: "+parentOp); + return null; + } + + long inputSize = currInputStat.getDataSize(); + if ((bigInputStat == null) || + ((bigInputStat != null) && + (inputSize > bigInputStat.getDataSize()))) { + + if (bigTableFound) { + // cannot convert to map join; we've already chosen a big table + // on size and there's another one that's bigger. + return null; + } + + if (inputSize > maxSize) { + if (!bigTableCandidateSet.contains(pos)) { + // can't use the current table as the big table, but it's too + // big for the map side. + return null; + } + + bigTableFound = true; + } + + if (bigInputStat != null) { + // we're replacing the current big table with a new one. Need + // to count the current one as a map table then. + totalSize += bigInputStat.getDataSize(); + } + + if (totalSize > maxSize) { + // sum of small tables size in this join exceeds configured limit + // hence cannot convert. + return null; + } + + if (bigTableCandidateSet.contains(pos)) { + bigTablePosition = pos; + bigInputStat = currInputStat; + } + } else { + totalSize += currInputStat.getDataSize(); + if (totalSize > maxSize) { + // cannot hold all map tables in memory. Cannot convert. + return null; + } + } + pos++; + } + + if (bigTablePosition == -1) { + // all tables have size 0. We let the shuffle join handle this case. + return null; + } + + /* + * Once we have decided on the map join, the tree would transform from + * + * | | + * Join MapJoin + * / \ / \ + * RS RS ---> RS TS (big table) + * / \ / + * TS TS TS (small table) + * + * for tez. + */ + + // convert to a map join operator with this information + ParseContext parseContext = context.parseContext; + MapJoinOperator mapJoinOp = MapJoinProcessor. + convertJoinOpMapJoinOp(context.conf, parseContext.getOpParseCtx(), + joinOp, parseContext.getJoinContext().get(joinOp), bigTablePosition, true); + + Operator parentBigTableOp + = mapJoinOp.getParentOperators().get(bigTablePosition); + + if (parentBigTableOp instanceof ReduceSinkOperator) { + mapJoinOp.getParentOperators().remove(bigTablePosition); + if (!(mapJoinOp.getParentOperators().contains( + parentBigTableOp.getParentOperators().get(0)))) { + mapJoinOp.getParentOperators().add(bigTablePosition, + parentBigTableOp.getParentOperators().get(0)); + } + parentBigTableOp.getParentOperators().get(0).removeChild(parentBigTableOp); + for (Operator op : mapJoinOp.getParentOperators()) { + if (!(op.getChildOperators().contains(mapJoinOp))) { + op.getChildOperators().add(mapJoinOp); + } + op.getChildOperators().remove(joinOp); + } + } + + return null; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java (working copy) @@ -19,10 +19,8 @@ package org.apache.hadoop.hive.ql.optimizer; import java.io.Serializable; -import java.net.URI; import java.util.ArrayList; import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Stack; @@ -29,45 +27,19 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.ql.Context; -import org.apache.hadoop.hive.ql.exec.ColumnInfo; -import org.apache.hadoop.hive.ql.exec.ConditionalTask; -import org.apache.hadoop.hive.ql.exec.DependencyCollectionTask; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; -import org.apache.hadoop.hive.ql.exec.MoveTask; import org.apache.hadoop.hive.ql.exec.Operator; -import org.apache.hadoop.hive.ql.exec.OperatorFactory; -import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.UnionOperator; -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.io.RCFileInputFormat; -import org.apache.hadoop.hive.ql.io.rcfile.merge.MergeWork; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles; -import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles.ConditionalResolverMergeFilesCtx; -import org.apache.hadoop.hive.ql.plan.ConditionalWork; -import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; -import org.apache.hadoop.hive.ql.plan.LoadFileDesc; -import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.MapredWork; -import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; -import org.apache.hadoop.hive.ql.plan.PartitionDesc; -import org.apache.hadoop.hive.ql.plan.StatsWork; -import org.apache.hadoop.hive.ql.plan.TableDesc; -import org.apache.hadoop.hive.ql.stats.StatsFactory; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -import org.apache.hadoop.mapred.InputFormat; /** * Processor for the rule - table scan followed by reduce sink. @@ -97,8 +69,7 @@ FileSinkOperator fsOp = (FileSinkOperator) nd; boolean isInsertTable = // is INSERT OVERWRITE TABLE - fsOp.getConf().getTableInfo().getTableName() != null && - parseCtx.getQB().getParseInfo().isInsertToTable(); + GenMapRedUtils.isInsertInto(parseCtx, fsOp); HiveConf hconf = parseCtx.getConf(); // Mark this task as a final map reduce task (ignoring the optional merge task) @@ -113,49 +84,12 @@ return true; } - // Has the user enabled merging of files for map-only jobs or for all jobs - if ((ctx.getMvTask() != null) && (!ctx.getMvTask().isEmpty())) { - List> mvTasks = ctx.getMvTask(); - - // In case of unions or map-joins, it is possible that the file has - // already been seen. - // So, no need to attempt to merge the files again. - if ((ctx.getSeenFileSinkOps() == null) - || (!ctx.getSeenFileSinkOps().contains(nd))) { - - // no need of merging if the move is to a local file system - MoveTask mvTask = (MoveTask) findMoveTask(mvTasks, fsOp); - - if (mvTask != null && isInsertTable && hconf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER)) { - addStatsTask(fsOp, mvTask, currTask, parseCtx.getConf()); - } - - if ((mvTask != null) && !mvTask.isLocal() && fsOp.getConf().canBeMerged()) { - if (fsOp.getConf().isLinkedFileSink()) { - // If the user has HIVEMERGEMAPREDFILES set to false, the idea was the - // number of reducers are few, so the number of files anyway are small. - // However, with this optimization, we are increasing the number of files - // possibly by a big margin. So, merge aggresively. - if (hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) || - hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES)) { - chDir = true; - } - } else { - // There are separate configuration parameters to control whether to - // merge for a map-only job - // or for a map-reduce job - MapredWork currWork = (MapredWork) currTask.getWork(); - boolean mergeMapOnly = - hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) && currWork.getReduceWork() == null; - boolean mergeMapRed = - hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES) && - currWork.getReduceWork() != null; - if (mergeMapOnly || mergeMapRed) { - chDir = true; - } - } - } - } + // In case of unions or map-joins, it is possible that the file has + // already been seen. + // So, no need to attempt to merge the files again. + if ((ctx.getSeenFileSinkOps() == null) + || (!ctx.getSeenFileSinkOps().contains(nd))) { + chDir = GenMapRedUtils.isMergeRequired(ctx.getMvTask(), hconf, fsOp, currTask, isInsertTable); } String finalName = processFS(fsOp, stack, opProcCtx, chDir); @@ -164,7 +98,9 @@ // Merge the files in the destination table/partitions by creating Map-only merge job // If underlying data is RCFile a RCFileBlockMerge task would be created. LOG.info("using CombineHiveInputformat for the merge job"); - createMRWorkForMergingFiles(fsOp, ctx, finalName); + GenMapRedUtils.createMRWorkForMergingFiles(fsOp, finalName, + ctx.getDependencyTaskForMultiInsert(), ctx.getMvTask(), + hconf, currTask); } FileSinkDesc fileSinkDesc = fsOp.getConf(); @@ -207,436 +143,6 @@ } /** - * Add the StatsTask as a dependent task of the MoveTask - * because StatsTask will change the Table/Partition metadata. For atomicity, we - * should not change it before the data is actually there done by MoveTask. - * - * @param nd - * the FileSinkOperator whose results are taken care of by the MoveTask. - * @param mvTask - * The MoveTask that moves the FileSinkOperator's results. - * @param currTask - * The MapRedTask that the FileSinkOperator belongs to. - * @param hconf - * HiveConf - */ - private void addStatsTask(FileSinkOperator nd, MoveTask mvTask, - Task currTask, HiveConf hconf) { - - MoveWork mvWork = mvTask.getWork(); - StatsWork statsWork = null; - if (mvWork.getLoadTableWork() != null) { - statsWork = new StatsWork(mvWork.getLoadTableWork()); - } else if (mvWork.getLoadFileWork() != null) { - statsWork = new StatsWork(mvWork.getLoadFileWork()); - } - assert statsWork != null : "Error when genereting StatsTask"; - - statsWork.setSourceTask(currTask); - statsWork.setStatsReliable(hconf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE)); - MapredWork mrWork = (MapredWork) currTask.getWork(); - - // AggKey in StatsWork is used for stats aggregation while StatsAggPrefix - // in FileSinkDesc is used for stats publishing. They should be consistent. - statsWork.setAggKey(nd.getConf().getStatsAggPrefix()); - Task statsTask = TaskFactory.get(statsWork, hconf); - - // mark the MapredWork and FileSinkOperator for gathering stats - nd.getConf().setGatherStats(true); - mrWork.getMapWork().setGatheringStats(true); - if (mrWork.getReduceWork() != null) { - mrWork.getReduceWork().setGatheringStats(true); - } - nd.getConf().setStatsReliable(hconf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE)); - nd.getConf().setMaxStatsKeyPrefixLength(StatsFactory.getMaxPrefixLength(hconf)); - // mrWork.addDestinationTable(nd.getConf().getTableInfo().getTableName()); - - // subscribe feeds from the MoveTask so that MoveTask can forward the list - // of dynamic partition list to the StatsTask - mvTask.addDependentTask(statsTask); - statsTask.subscribeFeed(mvTask); - } - - /** - * @param fsInput The FileSink operator. - * @param ctx The MR processing context. - * @param finalName the final destination path the merge job should output. - * @throws SemanticException - - * create a Map-only merge job using CombineHiveInputFormat for all partitions with - * following operators: - * MR job J0: - * ... - * | - * v - * FileSinkOperator_1 (fsInput) - * | - * v - * Merge job J1: - * | - * v - * TableScan (using CombineHiveInputFormat) (tsMerge) - * | - * v - * FileSinkOperator (fsMerge) - * - * Here the pathToPartitionInfo & pathToAlias will remain the same, which means the paths - * do - * not contain the dynamic partitions (their parent). So after the dynamic partitions are - * created (after the first job finished before the moveTask or ConditionalTask start), - * we need to change the pathToPartitionInfo & pathToAlias to include the dynamic - * partition - * directories. - * - */ - private void createMRWorkForMergingFiles (FileSinkOperator fsInput, GenMRProcContext ctx, - String finalName) throws SemanticException { - - // - // 1. create the operator tree - // - HiveConf conf = ctx.getParseCtx().getConf(); - FileSinkDesc fsInputDesc = fsInput.getConf(); - - // Create a TableScan operator - RowSchema inputRS = fsInput.getSchema(); - Operator tsMerge = - GenMapRedUtils.createTemporaryTableScanOperator(inputRS); - - // Create a FileSink operator - TableDesc ts = (TableDesc) fsInputDesc.getTableInfo().clone(); - FileSinkDesc fsOutputDesc = new FileSinkDesc(finalName, ts, - conf.getBoolVar(ConfVars.COMPRESSRESULT)); - FileSinkOperator fsOutput = (FileSinkOperator) OperatorFactory.getAndMakeChild( - fsOutputDesc, inputRS, tsMerge); - - // If the input FileSinkOperator is a dynamic partition enabled, the tsMerge input schema - // needs to include the partition column, and the fsOutput should have - // a DynamicPartitionCtx to indicate that it needs to dynamically partitioned. - DynamicPartitionCtx dpCtx = fsInputDesc.getDynPartCtx(); - if (dpCtx != null && dpCtx.getNumDPCols() > 0) { - // adding DP ColumnInfo to the RowSchema signature - ArrayList signature = inputRS.getSignature(); - String tblAlias = fsInputDesc.getTableInfo().getTableName(); - LinkedHashMap colMap = new LinkedHashMap(); - StringBuilder partCols = new StringBuilder(); - for (String dpCol : dpCtx.getDPColNames()) { - ColumnInfo colInfo = new ColumnInfo(dpCol, - TypeInfoFactory.stringTypeInfo, // all partition column type should be string - tblAlias, true); // partition column is virtual column - signature.add(colInfo); - colMap.put(dpCol, dpCol); // input and output have the same column name - partCols.append(dpCol).append('/'); - } - partCols.setLength(partCols.length() - 1); // remove the last '/' - inputRS.setSignature(signature); - - // create another DynamicPartitionCtx, which has a different input-to-DP column mapping - DynamicPartitionCtx dpCtx2 = new DynamicPartitionCtx(dpCtx); - dpCtx2.setInputToDPCols(colMap); - fsOutputDesc.setDynPartCtx(dpCtx2); - - // update the FileSinkOperator to include partition columns - fsInputDesc.getTableInfo().getProperties().setProperty( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, - partCols.toString()); // list of dynamic partition column names - } else { - // non-partitioned table - fsInputDesc.getTableInfo().getProperties().remove( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS); - } - - // - // 2. Constructing a conditional task consisting of a move task and a map reduce task - // - MoveWork dummyMv = new MoveWork(null, null, null, - new LoadFileDesc(new Path(fsInputDesc.getFinalDirName()), finalName, true, null, null), false); - MapWork cplan; - Serializable work; - - if (conf.getBoolVar(ConfVars.HIVEMERGERCFILEBLOCKLEVEL) && - fsInputDesc.getTableInfo().getInputFileFormatClass().equals(RCFileInputFormat.class)) { - - // Check if InputFormatClass is valid - String inputFormatClass = conf.getVar(ConfVars.HIVEMERGEINPUTFORMATBLOCKLEVEL); - try { - Class c = (Class) Class.forName(inputFormatClass); - - LOG.info("RCFile format- Using block level merge"); - cplan = createRCFileMergeTask(fsInputDesc, finalName, - dpCtx != null && dpCtx.getNumDPCols() > 0); - work = cplan; - } catch (ClassNotFoundException e) { - String msg = "Illegal input format class: " + inputFormatClass; - throw new SemanticException(msg); - } - - } else { - cplan = createMRWorkForMergingFiles(conf, tsMerge, fsInputDesc); - work = new MapredWork(); - ((MapredWork)work).setMapWork(cplan); - // use CombineHiveInputFormat for map-only merging - } - cplan.setInputformat("org.apache.hadoop.hive.ql.io.CombineHiveInputFormat"); - // NOTE: we should gather stats in MR1 rather than MR2 at merge job since we don't - // know if merge MR2 will be triggered at execution time - ConditionalTask cndTsk = createCondTask(conf, ctx.getCurrTask(), dummyMv, work, - fsInputDesc.getFinalDirName()); - - // keep the dynamic partition context in conditional task resolver context - ConditionalResolverMergeFilesCtx mrCtx = - (ConditionalResolverMergeFilesCtx) cndTsk.getResolverCtx(); - mrCtx.setDPCtx(fsInputDesc.getDynPartCtx()); - mrCtx.setLbCtx(fsInputDesc.getLbCtx()); - - // - // 3. add the moveTask as the children of the conditional task - // - linkMoveTask(ctx, fsOutput, cndTsk); - } - - /** - * Make the move task in the GenMRProcContext following the FileSinkOperator a dependent of all - * possible subtrees branching from the ConditionalTask. - * - * @param ctx - * @param newOutput - * @param cndTsk - */ - private void linkMoveTask(GenMRProcContext ctx, FileSinkOperator newOutput, - ConditionalTask cndTsk) { - - List> mvTasks = ctx.getMvTask(); - Task mvTask = findMoveTask(mvTasks, newOutput); - - for (Task tsk : cndTsk.getListTasks()) { - linkMoveTask(ctx, mvTask, tsk); - } - } - - /** - * Follows the task tree down from task and makes all leaves parents of mvTask - * - * @param ctx - * @param mvTask - * @param task - */ - private void linkMoveTask(GenMRProcContext ctx, Task mvTask, - Task task) { - - if (task.getDependentTasks() == null || task.getDependentTasks().isEmpty()) { - // If it's a leaf, add the move task as a child - addDependentMoveTasks(ctx, mvTask, task); - } else { - // Otherwise, for each child run this method recursively - for (Task childTask : task.getDependentTasks()) { - linkMoveTask(ctx, mvTask, childTask); - } - } - } - - /** - * Adds the dependencyTaskForMultiInsert in ctx as a dependent of parentTask. If mvTask is a - * load table, and HIVE_MULTI_INSERT_ATOMIC_OUTPUTS is set, adds mvTask as a dependent of - * dependencyTaskForMultiInsert in ctx, otherwise adds mvTask as a dependent of parentTask as - * well. - * - * @param ctx - * @param mvTask - * @param parentTask - */ - private void addDependentMoveTasks(GenMRProcContext ctx, Task mvTask, - Task parentTask) { - - if (mvTask != null) { - if (ctx.getConf().getBoolVar(ConfVars.HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES)) { - DependencyCollectionTask dependencyTask = ctx.getDependencyTaskForMultiInsert(); - parentTask.addDependentTask(dependencyTask); - if (mvTask.getWork().getLoadTableWork() != null) { - // Moving tables/partitions depend on the dependencyTask - dependencyTask.addDependentTask(mvTask); - } else { - // Moving files depends on the parentTask (we still want the dependencyTask to depend - // on the parentTask) - parentTask.addDependentTask(mvTask); - } - } else { - parentTask.addDependentTask(mvTask); - } - } - } - - /** - * Create a MapredWork based on input path, the top operator and the input - * table descriptor. - * - * @param conf - * @param topOp - * the table scan operator that is the root of the MapReduce task. - * @param fsDesc - * the file sink descriptor that serves as the input to this merge task. - * @param parentMR - * the parent MapReduce work - * @param parentFS - * the last FileSinkOperator in the parent MapReduce work - * @return the MapredWork - */ - private MapWork createMRWorkForMergingFiles (HiveConf conf, - Operator topOp, FileSinkDesc fsDesc) { - - ArrayList aliases = new ArrayList(); - String inputDir = fsDesc.getFinalDirName(); - TableDesc tblDesc = fsDesc.getTableInfo(); - aliases.add(inputDir); // dummy alias: just use the input path - - // constructing the default MapredWork - MapredWork cMrPlan = GenMapRedUtils.getMapRedWorkFromConf(conf); - MapWork cplan = cMrPlan.getMapWork(); - cplan.getPathToAliases().put(inputDir, aliases); - cplan.getPathToPartitionInfo().put(inputDir, new PartitionDesc(tblDesc, null)); - cplan.getAliasToWork().put(inputDir, topOp); - cplan.setMapperCannotSpanPartns(true); - - return cplan; - } - - /** - * Create a block level merge task for RCFiles. - * - * @param fsInputDesc - * @param finalName - * @return MergeWork if table is stored as RCFile, - * null otherwise - */ - private MapWork createRCFileMergeTask(FileSinkDesc fsInputDesc, - String finalName, boolean hasDynamicPartitions) throws SemanticException { - - String inputDir = fsInputDesc.getFinalDirName(); - TableDesc tblDesc = fsInputDesc.getTableInfo(); - - if (tblDesc.getInputFileFormatClass().equals(RCFileInputFormat.class)) { - ArrayList inputDirs = new ArrayList(); - if (!hasDynamicPartitions - && !isSkewedStoredAsDirs(fsInputDesc)) { - inputDirs.add(inputDir); - } - - MergeWork work = new MergeWork(inputDirs, finalName, - hasDynamicPartitions, fsInputDesc.getDynPartCtx()); - LinkedHashMap> pathToAliases = - new LinkedHashMap>(); - pathToAliases.put(inputDir, (ArrayList) inputDirs.clone()); - work.setMapperCannotSpanPartns(true); - work.setPathToAliases(pathToAliases); - work.setAliasToWork( - new LinkedHashMap>()); - if (hasDynamicPartitions - || isSkewedStoredAsDirs(fsInputDesc)) { - work.getPathToPartitionInfo().put(inputDir, - new PartitionDesc(tblDesc, null)); - } - work.setListBucketingCtx(fsInputDesc.getLbCtx()); - - return work; - } - - throw new SemanticException("createRCFileMergeTask called on non-RCFile table"); - } - - /** - * check if it is skewed table and stored as dirs. - * - * @param fsInputDesc - * @return - */ - private boolean isSkewedStoredAsDirs(FileSinkDesc fsInputDesc) { - return (fsInputDesc.getLbCtx() == null) ? false : fsInputDesc.getLbCtx() - .isSkewedStoredAsDir(); - } - - /** - * Construct a conditional task given the current leaf task, the MoveWork and the MapredWork. - * - * @param conf - * HiveConf - * @param currTask - * current leaf task - * @param mvWork - * MoveWork for the move task - * @param mergeWork - * MapredWork for the merge task. - * @param inputPath - * the input directory of the merge/move task - * @return The conditional task - */ - private ConditionalTask createCondTask(HiveConf conf, - Task currTask, MoveWork mvWork, - Serializable mergeWork, String inputPath) { - - // There are 3 options for this ConditionalTask: - // 1) Merge the partitions - // 2) Move the partitions (i.e. don't merge the partitions) - // 3) Merge some partitions and move other partitions (i.e. merge some partitions and don't - // merge others) in this case the merge is done first followed by the move to prevent - // conflicts. - Task mergeOnlyMergeTask = TaskFactory.get(mergeWork, conf); - Task moveOnlyMoveTask = TaskFactory.get(mvWork, conf); - Task mergeAndMoveMergeTask = TaskFactory.get(mergeWork, conf); - Task mergeAndMoveMoveTask = TaskFactory.get(mvWork, conf); - - // NOTE! It is necessary merge task is the parent of the move task, and not - // the other way around, for the proper execution of the execute method of - // ConditionalTask - mergeAndMoveMergeTask.addDependentTask(mergeAndMoveMoveTask); - - List listWorks = new ArrayList(); - listWorks.add(mvWork); - listWorks.add(mergeWork); - - ConditionalWork cndWork = new ConditionalWork(listWorks); - - List> listTasks = new ArrayList>(); - listTasks.add(moveOnlyMoveTask); - listTasks.add(mergeOnlyMergeTask); - listTasks.add(mergeAndMoveMergeTask); - - ConditionalTask cndTsk = (ConditionalTask) TaskFactory.get(cndWork, conf); - cndTsk.setListTasks(listTasks); - - // create resolver - cndTsk.setResolver(new ConditionalResolverMergeFiles()); - ConditionalResolverMergeFilesCtx mrCtx = - new ConditionalResolverMergeFilesCtx(listTasks, inputPath); - cndTsk.setResolverCtx(mrCtx); - - // make the conditional task as the child of the current leaf task - currTask.addDependentTask(cndTsk); - - return cndTsk; - } - - private Task findMoveTask( - List> mvTasks, FileSinkOperator fsOp) { - // find the move task - for (Task mvTsk : mvTasks) { - MoveWork mvWork = mvTsk.getWork(); - String srcDir = null; - if (mvWork.getLoadFileWork() != null) { - srcDir = mvWork.getLoadFileWork().getSourceDir(); - } else if (mvWork.getLoadTableWork() != null) { - srcDir = mvWork.getLoadTableWork().getSourceDir(); - } - - String fsOpDirName = fsOp.getConf().getFinalDirName(); - if ((srcDir != null) - && (srcDir.equalsIgnoreCase(fsOpDirName))) { - return mvTsk; - } - } - return null; - } - - /** * Process the FileSink operator to generate a MoveTask if necessary. * * @param fsOp @@ -654,6 +160,11 @@ NodeProcessorCtx opProcCtx, boolean chDir) throws SemanticException { GenMRProcContext ctx = (GenMRProcContext) opProcCtx; + Task currTask = ctx.getCurrTask(); + + // If the directory needs to be changed, send the new directory + String dest = null; + List seenFSOps = ctx.getSeenFileSinkOps(); if (seenFSOps == null) { seenFSOps = new ArrayList(); @@ -663,49 +174,14 @@ } ctx.setSeenFileSinkOps(seenFSOps); - Task currTask = ctx.getCurrTask(); + dest = GenMapRedUtils.createMoveTask(ctx.getCurrTask(), chDir, fsOp, ctx.getParseCtx(), + ctx.getMvTask(), ctx.getConf(), ctx.getDependencyTaskForMultiInsert()); - // If the directory needs to be changed, send the new directory - String dest = null; - - if (chDir) { - dest = fsOp.getConf().getFinalDirName(); - - // generate the temporary file - // it must be on the same file system as the current destination - ParseContext parseCtx = ctx.getParseCtx(); - Context baseCtx = parseCtx.getContext(); - String tmpDir = baseCtx.getExternalTmpFileURI((new Path(dest)).toUri()); - - FileSinkDesc fileSinkDesc = fsOp.getConf(); - // Change all the linked file sink descriptors - if (fileSinkDesc.isLinkedFileSink()) { - for (FileSinkDesc fsConf:fileSinkDesc.getLinkedFileSinkDesc()) { - String fileName = Utilities.getFileNameFromDirName(fsConf.getDirName()); - fsConf.setParentDir(tmpDir); - fsConf.setDirName(tmpDir + Path.SEPARATOR + fileName); - } - } else { - fileSinkDesc.setDirName(tmpDir); - } - } - - Task mvTask = null; - - if (!chDir) { - mvTask = findMoveTask(ctx.getMvTask(), fsOp); - } - Operator currTopOp = ctx.getCurrTopOp(); String currAliasId = ctx.getCurrAliasId(); HashMap, Task> opTaskMap = ctx.getOpTaskMap(); - // Set the move task to be dependent on the current task - if (mvTask != null) { - addDependentMoveTasks(ctx, mvTask, currTask); - } - // In case of multi-table insert, the path to alias mapping is needed for // all the sources. Since there is no // reducer, treat it as a plan with null reducer Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java (working copy) @@ -27,6 +27,7 @@ import java.util.Set; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.exec.DependencyCollectionTask; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.exec.Operator; @@ -440,8 +441,10 @@ */ public DependencyCollectionTask getDependencyTaskForMultiInsert() { if (dependencyTaskForMultiInsert == null) { - dependencyTaskForMultiInsert = - (DependencyCollectionTask) TaskFactory.get(new DependencyCollectionWork(), conf); + if (conf.getBoolVar(ConfVars.HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES)) { + dependencyTaskForMultiInsert = + (DependencyCollectionTask) TaskFactory.get(new DependencyCollectionWork(), conf); + } } return dependencyTaskForMultiInsert; } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (working copy) @@ -33,11 +33,15 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.ConditionalTask; import org.apache.hadoop.hive.ql.exec.DemuxOperator; +import org.apache.hadoop.hive.ql.exec.DependencyCollectionTask; +import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.exec.JoinOperator; +import org.apache.hadoop.hive.ql.exec.MoveTask; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.OperatorFactory; @@ -52,6 +56,8 @@ import org.apache.hadoop.hive.ql.exec.mr.ExecDriver; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.io.RCFileInputFormat; +import org.apache.hadoop.hive.ql.io.rcfile.merge.MergeWork; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMRUnionCtx; @@ -64,20 +70,32 @@ import org.apache.hadoop.hive.ql.parse.QBJoinTree; import org.apache.hadoop.hive.ql.parse.RowResolver; import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.BaseWork; +import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles; +import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles.ConditionalResolverMergeFilesCtx; +import org.apache.hadoop.hive.ql.plan.ConditionalWork; +import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.FetchWork; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc; +import org.apache.hadoop.hive.ql.plan.LoadFileDesc; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.MapredLocalWork; import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; import org.apache.hadoop.hive.ql.plan.ReduceWork; +import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; +import org.apache.hadoop.hive.ql.plan.TezWork; +import org.apache.hadoop.hive.ql.stats.StatsFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.mapred.InputFormat; /** * General utility common functions for the Processor to convert operator into @@ -90,7 +108,7 @@ LOG = LogFactory.getLog("org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils"); } - private static boolean needsTagging(ReduceWork rWork) { + public static boolean needsTagging(ReduceWork rWork) { return rWork != null && (rWork.getReducer().getClass() == JoinOperator.class || rWork.getReducer().getClass() == DemuxOperator.class); } @@ -444,10 +462,32 @@ public static void setTaskPlan(String alias_id, Operator topOp, Task task, boolean local, GenMRProcContext opProcCtx, PrunedPartitionList pList) throws SemanticException { - MapWork plan = ((MapredWork) task.getWork()).getMapWork(); - ParseContext parseCtx = opProcCtx.getParseCtx(); - Set inputs = opProcCtx.getInputs(); + setMapWork(((MapredWork) task.getWork()).getMapWork(), opProcCtx.getParseCtx(), + opProcCtx.getInputs(), pList, topOp, alias_id, opProcCtx.getConf(), local); + opProcCtx.addSeenOp(task, topOp); + } + /** + * initialize MapWork + * + * @param alias_id + * current alias + * @param topOp + * the top operator of the stack + * @param plan + * map work to initialize + * @param local + * whether you need to add to map-reduce or local work + * @param pList + * pruned partition list. If it is null it will be computed on-the-fly. + * @param inputs + * read entities for the map work + * @param conf + * current instance of hive conf + */ + public static void setMapWork(MapWork plan, ParseContext parseCtx, Set inputs, + PrunedPartitionList partsList, Operator topOp, String alias_id, + HiveConf conf, boolean local) throws SemanticException { ArrayList partDir = new ArrayList(); ArrayList partDesc = new ArrayList(); @@ -454,8 +494,6 @@ Path tblDir = null; TableDesc tblDesc = null; - PrunedPartitionList partsList = pList; - plan.setNameToSplitSample(parseCtx.getNameToSplitSample()); if (partsList == null) { @@ -701,7 +739,6 @@ } plan.setMapLocalWork(localPlan); } - opProcCtx.addSeenOp(task, topOp); } /** @@ -751,6 +788,21 @@ } /** + * Set key and value descriptor + * @param work RedueWork + * @param rs ReduceSinkOperator + */ + public static void setKeyAndValueDesc(ReduceWork work, ReduceSinkOperator rs) { + work.setKeyDesc(rs.getConf().getKeySerializeInfo()); + int tag = Math.max(0, rs.getConf().getTag()); + List tagToSchema = work.getTagToValueDesc(); + while (tag + 1 > tagToSchema.size()) { + tagToSchema.add(null); + } + tagToSchema.set(tag, rs.getConf().getValueSerializeInfo()); + } + + /** * set key and value descriptor. * * @param plan @@ -766,13 +818,7 @@ if (topOp instanceof ReduceSinkOperator) { ReduceSinkOperator rs = (ReduceSinkOperator) topOp; - plan.setKeyDesc(rs.getConf().getKeySerializeInfo()); - int tag = Math.max(0, rs.getConf().getTag()); - List tagToSchema = plan.getTagToValueDesc(); - while (tag + 1 > tagToSchema.size()) { - tagToSchema.add(null); - } - tagToSchema.set(tag, rs.getConf().getValueSerializeInfo()); + setKeyAndValueDesc(plan, rs); } else { List> children = topOp.getChildOperators(); if (children != null) { @@ -1096,6 +1142,584 @@ } } + /** + * @param fsInput The FileSink operator. + * @param ctx The MR processing context. + * @param finalName the final destination path the merge job should output. + * @param dependencyTask + * @param mvTasks + * @param conf + * @param currTask + * @throws SemanticException + + * create a Map-only merge job using CombineHiveInputFormat for all partitions with + * following operators: + * MR job J0: + * ... + * | + * v + * FileSinkOperator_1 (fsInput) + * | + * v + * Merge job J1: + * | + * v + * TableScan (using CombineHiveInputFormat) (tsMerge) + * | + * v + * FileSinkOperator (fsMerge) + * + * Here the pathToPartitionInfo & pathToAlias will remain the same, which means the paths + * do + * not contain the dynamic partitions (their parent). So after the dynamic partitions are + * created (after the first job finished before the moveTask or ConditionalTask start), + * we need to change the pathToPartitionInfo & pathToAlias to include the dynamic + * partition + * directories. + * + */ + public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, + String finalName, DependencyCollectionTask dependencyTask, + List> mvTasks, HiveConf conf, + Task currTask) throws SemanticException { + + // + // 1. create the operator tree + // + FileSinkDesc fsInputDesc = fsInput.getConf(); + + // Create a TableScan operator + RowSchema inputRS = fsInput.getSchema(); + Operator tsMerge = + GenMapRedUtils.createTemporaryTableScanOperator(inputRS); + + // Create a FileSink operator + TableDesc ts = (TableDesc) fsInputDesc.getTableInfo().clone(); + FileSinkDesc fsOutputDesc = new FileSinkDesc(finalName, ts, + conf.getBoolVar(ConfVars.COMPRESSRESULT)); + FileSinkOperator fsOutput = (FileSinkOperator) OperatorFactory.getAndMakeChild( + fsOutputDesc, inputRS, tsMerge); + + // If the input FileSinkOperator is a dynamic partition enabled, the tsMerge input schema + // needs to include the partition column, and the fsOutput should have + // a DynamicPartitionCtx to indicate that it needs to dynamically partitioned. + DynamicPartitionCtx dpCtx = fsInputDesc.getDynPartCtx(); + if (dpCtx != null && dpCtx.getNumDPCols() > 0) { + // adding DP ColumnInfo to the RowSchema signature + ArrayList signature = inputRS.getSignature(); + String tblAlias = fsInputDesc.getTableInfo().getTableName(); + LinkedHashMap colMap = new LinkedHashMap(); + StringBuilder partCols = new StringBuilder(); + for (String dpCol : dpCtx.getDPColNames()) { + ColumnInfo colInfo = new ColumnInfo(dpCol, + TypeInfoFactory.stringTypeInfo, // all partition column type should be string + tblAlias, true); // partition column is virtual column + signature.add(colInfo); + colMap.put(dpCol, dpCol); // input and output have the same column name + partCols.append(dpCol).append('/'); + } + partCols.setLength(partCols.length() - 1); // remove the last '/' + inputRS.setSignature(signature); + + // create another DynamicPartitionCtx, which has a different input-to-DP column mapping + DynamicPartitionCtx dpCtx2 = new DynamicPartitionCtx(dpCtx); + dpCtx2.setInputToDPCols(colMap); + fsOutputDesc.setDynPartCtx(dpCtx2); + + // update the FileSinkOperator to include partition columns + fsInputDesc.getTableInfo().getProperties().setProperty( + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, + partCols.toString()); // list of dynamic partition column names + } else { + // non-partitioned table + fsInputDesc.getTableInfo().getProperties().remove( + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS); + } + + // + // 2. Constructing a conditional task consisting of a move task and a map reduce task + // + MoveWork dummyMv = new MoveWork(null, null, null, + new LoadFileDesc(new Path(fsInputDesc.getFinalDirName()), finalName, true, null, null), false); + MapWork cplan; + Serializable work; + + if (conf.getBoolVar(ConfVars.HIVEMERGERCFILEBLOCKLEVEL) && + fsInputDesc.getTableInfo().getInputFileFormatClass().equals(RCFileInputFormat.class)) { + + // Check if InputFormatClass is valid + String inputFormatClass = conf.getVar(ConfVars.HIVEMERGEINPUTFORMATBLOCKLEVEL); + try { + Class c = (Class) Class.forName(inputFormatClass); + + LOG.info("RCFile format- Using block level merge"); + cplan = GenMapRedUtils.createRCFileMergeTask(fsInputDesc, finalName, + dpCtx != null && dpCtx.getNumDPCols() > 0); + work = cplan; + } catch (ClassNotFoundException e) { + String msg = "Illegal input format class: " + inputFormatClass; + throw new SemanticException(msg); + } + + } else { + cplan = createMRWorkForMergingFiles(conf, tsMerge, fsInputDesc); + if (conf.getBoolVar(ConfVars.HIVE_OPTIMIZE_TEZ)) { + work = new TezWork(); + cplan.setName("Merge"); + ((TezWork)work).add(cplan); + } else { + work = new MapredWork(); + ((MapredWork)work).setMapWork(cplan); + } + } + // use CombineHiveInputFormat for map-only merging + cplan.setInputformat("org.apache.hadoop.hive.ql.io.CombineHiveInputFormat"); + // NOTE: we should gather stats in MR1 rather than MR2 at merge job since we don't + // know if merge MR2 will be triggered at execution time + ConditionalTask cndTsk = GenMapRedUtils.createCondTask(conf, currTask, dummyMv, work, + fsInputDesc.getFinalDirName()); + + // keep the dynamic partition context in conditional task resolver context + ConditionalResolverMergeFilesCtx mrCtx = + (ConditionalResolverMergeFilesCtx) cndTsk.getResolverCtx(); + mrCtx.setDPCtx(fsInputDesc.getDynPartCtx()); + mrCtx.setLbCtx(fsInputDesc.getLbCtx()); + + // + // 3. add the moveTask as the children of the conditional task + // + linkMoveTask(fsOutput, cndTsk, mvTasks, conf, dependencyTask); + } + + /** + * Make the move task in the GenMRProcContext following the FileSinkOperator a dependent of all + * possible subtrees branching from the ConditionalTask. + * + * @param newOutput + * @param cndTsk + * @param mvTasks + * @param hconf + * @param dependencyTask + */ + public static void linkMoveTask(FileSinkOperator newOutput, + ConditionalTask cndTsk, List> mvTasks, HiveConf hconf, + DependencyCollectionTask dependencyTask) { + + Task mvTask = GenMapRedUtils.findMoveTask(mvTasks, newOutput); + + for (Task tsk : cndTsk.getListTasks()) { + linkMoveTask(mvTask, tsk, hconf, dependencyTask); + } + } + + /** + * Follows the task tree down from task and makes all leaves parents of mvTask + * + * @param mvTask + * @param task + * @param hconf + * @param dependencyTask + */ + public static void linkMoveTask(Task mvTask, + Task task, HiveConf hconf, + DependencyCollectionTask dependencyTask) { + + if (task.getDependentTasks() == null || task.getDependentTasks().isEmpty()) { + // If it's a leaf, add the move task as a child + addDependentMoveTasks(mvTask, hconf, task, dependencyTask); + } else { + // Otherwise, for each child run this method recursively + for (Task childTask : task.getDependentTasks()) { + linkMoveTask(mvTask, childTask, hconf, dependencyTask); + } + } + } + + /** + * Adds the dependencyTaskForMultiInsert in ctx as a dependent of parentTask. If mvTask is a + * load table, and HIVE_MULTI_INSERT_ATOMIC_OUTPUTS is set, adds mvTask as a dependent of + * dependencyTaskForMultiInsert in ctx, otherwise adds mvTask as a dependent of parentTask as + * well. + * + * @param mvTask + * @param hconf + * @param parentTask + * @param dependencyTask + */ + public static void addDependentMoveTasks(Task mvTask, HiveConf hconf, + Task parentTask, DependencyCollectionTask dependencyTask) { + + if (mvTask != null) { + if (dependencyTask != null) { + parentTask.addDependentTask(dependencyTask); + if (mvTask.getWork().getLoadTableWork() != null) { + // Moving tables/partitions depend on the dependencyTask + dependencyTask.addDependentTask(mvTask); + } else { + // Moving files depends on the parentTask (we still want the dependencyTask to depend + // on the parentTask) + parentTask.addDependentTask(mvTask); + } + } else { + parentTask.addDependentTask(mvTask); + } + } + } + + + /** + * Add the StatsTask as a dependent task of the MoveTask + * because StatsTask will change the Table/Partition metadata. For atomicity, we + * should not change it before the data is actually there done by MoveTask. + * + * @param nd + * the FileSinkOperator whose results are taken care of by the MoveTask. + * @param mvTask + * The MoveTask that moves the FileSinkOperator's results. + * @param currTask + * The MapRedTask that the FileSinkOperator belongs to. + * @param hconf + * HiveConf + */ + public static void addStatsTask(FileSinkOperator nd, MoveTask mvTask, + Task currTask, HiveConf hconf) { + + MoveWork mvWork = mvTask.getWork(); + StatsWork statsWork = null; + if (mvWork.getLoadTableWork() != null) { + statsWork = new StatsWork(mvWork.getLoadTableWork()); + } else if (mvWork.getLoadFileWork() != null) { + statsWork = new StatsWork(mvWork.getLoadFileWork()); + } + assert statsWork != null : "Error when genereting StatsTask"; + + statsWork.setSourceTask(currTask); + statsWork.setStatsReliable(hconf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE)); + + if (currTask.getWork() instanceof MapredWork) { + MapredWork mrWork = (MapredWork) currTask.getWork(); + mrWork.getMapWork().setGatheringStats(true); + if (mrWork.getReduceWork() != null) { + mrWork.getReduceWork().setGatheringStats(true); + } + } else { + TezWork work = (TezWork) currTask.getWork(); + for (BaseWork w: work.getAllWork()) { + w.setGatheringStats(true); + } + } + + // AggKey in StatsWork is used for stats aggregation while StatsAggPrefix + // in FileSinkDesc is used for stats publishing. They should be consistent. + statsWork.setAggKey(nd.getConf().getStatsAggPrefix()); + Task statsTask = TaskFactory.get(statsWork, hconf); + + // mark the MapredWork and FileSinkOperator for gathering stats + nd.getConf().setGatherStats(true); + nd.getConf().setStatsReliable(hconf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE)); + nd.getConf().setMaxStatsKeyPrefixLength(StatsFactory.getMaxPrefixLength(hconf)); + // mrWork.addDestinationTable(nd.getConf().getTableInfo().getTableName()); + + // subscribe feeds from the MoveTask so that MoveTask can forward the list + // of dynamic partition list to the StatsTask + mvTask.addDependentTask(statsTask); + statsTask.subscribeFeed(mvTask); + } + + /** + * Returns true iff current query is an insert into for the given file sink + * + * @param parseCtx + * @param fsOp + * @return + */ + public static boolean isInsertInto(ParseContext parseCtx, FileSinkOperator fsOp) { + return fsOp.getConf().getTableInfo().getTableName() != null && + parseCtx.getQB().getParseInfo().isInsertToTable(); + } + + /** + * Create a MapredWork based on input path, the top operator and the input + * table descriptor. + * + * @param conf + * @param topOp + * the table scan operator that is the root of the MapReduce task. + * @param fsDesc + * the file sink descriptor that serves as the input to this merge task. + * @param parentMR + * the parent MapReduce work + * @param parentFS + * the last FileSinkOperator in the parent MapReduce work + * @return the MapredWork + */ + private static MapWork createMRWorkForMergingFiles (HiveConf conf, + Operator topOp, FileSinkDesc fsDesc) { + + ArrayList aliases = new ArrayList(); + String inputDir = fsDesc.getFinalDirName(); + TableDesc tblDesc = fsDesc.getTableInfo(); + aliases.add(inputDir); // dummy alias: just use the input path + + // constructing the default MapredWork + MapredWork cMrPlan = GenMapRedUtils.getMapRedWorkFromConf(conf); + MapWork cplan = cMrPlan.getMapWork(); + cplan.getPathToAliases().put(inputDir, aliases); + cplan.getPathToPartitionInfo().put(inputDir, new PartitionDesc(tblDesc, null)); + cplan.getAliasToWork().put(inputDir, topOp); + cplan.setMapperCannotSpanPartns(true); + + return cplan; + } + + /** + * Create a block level merge task for RCFiles. + * + * @param fsInputDesc + * @param finalName + * @return MergeWork if table is stored as RCFile, + * null otherwise + */ + public static MapWork createRCFileMergeTask(FileSinkDesc fsInputDesc, + String finalName, boolean hasDynamicPartitions) throws SemanticException { + + String inputDir = fsInputDesc.getFinalDirName(); + TableDesc tblDesc = fsInputDesc.getTableInfo(); + + if (tblDesc.getInputFileFormatClass().equals(RCFileInputFormat.class)) { + ArrayList inputDirs = new ArrayList(); + if (!hasDynamicPartitions + && !GenMapRedUtils.isSkewedStoredAsDirs(fsInputDesc)) { + inputDirs.add(inputDir); + } + + MergeWork work = new MergeWork(inputDirs, finalName, + hasDynamicPartitions, fsInputDesc.getDynPartCtx()); + LinkedHashMap> pathToAliases = + new LinkedHashMap>(); + pathToAliases.put(inputDir, (ArrayList) inputDirs.clone()); + work.setMapperCannotSpanPartns(true); + work.setPathToAliases(pathToAliases); + work.setAliasToWork( + new LinkedHashMap>()); + if (hasDynamicPartitions + || GenMapRedUtils.isSkewedStoredAsDirs(fsInputDesc)) { + work.getPathToPartitionInfo().put(inputDir, + new PartitionDesc(tblDesc, null)); + } + work.setListBucketingCtx(fsInputDesc.getLbCtx()); + + return work; + } + + throw new SemanticException("createRCFileMergeTask called on non-RCFile table"); + } + + /** + * Construct a conditional task given the current leaf task, the MoveWork and the MapredWork. + * + * @param conf + * HiveConf + * @param currTask + * current leaf task + * @param mvWork + * MoveWork for the move task + * @param mergeWork + * MapredWork for the merge task. + * @param inputPath + * the input directory of the merge/move task + * @return The conditional task + */ + @SuppressWarnings("unchecked") + public static ConditionalTask createCondTask(HiveConf conf, + Task currTask, MoveWork mvWork, + Serializable mergeWork, String inputPath) { + + // There are 3 options for this ConditionalTask: + // 1) Merge the partitions + // 2) Move the partitions (i.e. don't merge the partitions) + // 3) Merge some partitions and move other partitions (i.e. merge some partitions and don't + // merge others) in this case the merge is done first followed by the move to prevent + // conflicts. + Task mergeOnlyMergeTask = TaskFactory.get(mergeWork, conf); + Task moveOnlyMoveTask = TaskFactory.get(mvWork, conf); + Task mergeAndMoveMergeTask = TaskFactory.get(mergeWork, conf); + Task mergeAndMoveMoveTask = TaskFactory.get(mvWork, conf); + + // NOTE! It is necessary merge task is the parent of the move task, and not + // the other way around, for the proper execution of the execute method of + // ConditionalTask + mergeAndMoveMergeTask.addDependentTask(mergeAndMoveMoveTask); + + List listWorks = new ArrayList(); + listWorks.add(mvWork); + listWorks.add(mergeWork); + + ConditionalWork cndWork = new ConditionalWork(listWorks); + + List> listTasks = new ArrayList>(); + listTasks.add(moveOnlyMoveTask); + listTasks.add(mergeOnlyMergeTask); + listTasks.add(mergeAndMoveMergeTask); + + ConditionalTask cndTsk = (ConditionalTask) TaskFactory.get(cndWork, conf); + cndTsk.setListTasks(listTasks); + + // create resolver + cndTsk.setResolver(new ConditionalResolverMergeFiles()); + ConditionalResolverMergeFilesCtx mrCtx = + new ConditionalResolverMergeFilesCtx(listTasks, inputPath); + cndTsk.setResolverCtx(mrCtx); + + // make the conditional task as the child of the current leaf task + currTask.addDependentTask(cndTsk); + + return cndTsk; + } + + /** + * check if it is skewed table and stored as dirs. + * + * @param fsInputDesc + * @return + */ + public static boolean isSkewedStoredAsDirs(FileSinkDesc fsInputDesc) { + return (fsInputDesc.getLbCtx() == null) ? false : fsInputDesc.getLbCtx() + .isSkewedStoredAsDir(); + } + + public static Task findMoveTask( + List> mvTasks, FileSinkOperator fsOp) { + // find the move task + for (Task mvTsk : mvTasks) { + MoveWork mvWork = mvTsk.getWork(); + String srcDir = null; + if (mvWork.getLoadFileWork() != null) { + srcDir = mvWork.getLoadFileWork().getSourceDir(); + } else if (mvWork.getLoadTableWork() != null) { + srcDir = mvWork.getLoadTableWork().getSourceDir(); + } + + String fsOpDirName = fsOp.getConf().getFinalDirName(); + if ((srcDir != null) + && (srcDir.equalsIgnoreCase(fsOpDirName))) { + return mvTsk; + } + } + return null; + } + + /** + * Returns true iff the fsOp requires a merge + * @param mvTasks + * @param hconf + * @param fsOp + * @param currTask + * @param isInsertTable + * @return + */ + public static boolean isMergeRequired(List> mvTasks, HiveConf hconf, FileSinkOperator fsOp, + Task currTask, boolean isInsertTable) { + + // Has the user enabled merging of files for map-only jobs or for all jobs + if ((mvTasks != null) && (!mvTasks.isEmpty())) { + + // no need of merging if the move is to a local file system + MoveTask mvTask = (MoveTask) GenMapRedUtils.findMoveTask(mvTasks, fsOp); + + if (mvTask != null && isInsertTable && hconf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER)) { + GenMapRedUtils.addStatsTask(fsOp, mvTask, currTask, hconf); + } + + if ((mvTask != null) && !mvTask.isLocal() && fsOp.getConf().canBeMerged()) { + if (fsOp.getConf().isLinkedFileSink()) { + // If the user has HIVEMERGEMAPREDFILES set to false, the idea was the + // number of reducers are few, so the number of files anyway are small. + // However, with this optimization, we are increasing the number of files + // possibly by a big margin. So, merge aggresively. + if (hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) || + hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES)) { + return true; + } + } else { + // There are separate configuration parameters to control whether to + // merge for a map-only job + // or for a map-reduce job + if (currTask.getWork() instanceof TezWork) { + return hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) || + hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES); + } else if (currTask.getWork() instanceof MapredWork) { + ReduceWork reduceWork = ((MapredWork) currTask.getWork()).getReduceWork(); + boolean mergeMapOnly = + hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) && reduceWork == null; + boolean mergeMapRed = + hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES) && + reduceWork != null; + if (mergeMapOnly || mergeMapRed) { + return true; + } + } else { + return false; + } + } + } + } + return false; + } + + /** + * Create and add any dependent move tasks + * + * @param currTask + * @param chDir + * @param fsOp + * @param parseCtx + * @param mvTasks + * @param hconf + * @param dependencyTask + * @return + */ + public static String createMoveTask(Task currTask, boolean chDir, + FileSinkOperator fsOp, ParseContext parseCtx, List> mvTasks, + HiveConf hconf, DependencyCollectionTask dependencyTask) { + + String dest = null; + + if (chDir) { + dest = fsOp.getConf().getFinalDirName(); + + // generate the temporary file + // it must be on the same file system as the current destination + Context baseCtx = parseCtx.getContext(); + String tmpDir = baseCtx.getExternalTmpFileURI((new Path(dest)).toUri()); + + FileSinkDesc fileSinkDesc = fsOp.getConf(); + // Change all the linked file sink descriptors + if (fileSinkDesc.isLinkedFileSink()) { + for (FileSinkDesc fsConf:fileSinkDesc.getLinkedFileSinkDesc()) { + String fileName = Utilities.getFileNameFromDirName(fsConf.getDirName()); + fsConf.setParentDir(tmpDir); + fsConf.setDirName(tmpDir + Path.SEPARATOR + fileName); + } + } else { + fileSinkDesc.setDirName(tmpDir); + } + } + + Task mvTask = null; + + if (!chDir) { + mvTask = GenMapRedUtils.findMoveTask(mvTasks, fsOp); + } + + // Set the move task to be dependent on the current task + if (mvTask != null) { + GenMapRedUtils.addDependentMoveTasks(mvTask, hconf, currTask, dependencyTask); + } + + return dest; + } + private GenMapRedUtils() { // prevent instantiation } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java (working copy) @@ -286,7 +286,7 @@ currOp = currOp.getParentOperators().get(0); while (true) { - if (currOp.getParentOperators() == null) { + if ((currOp.getParentOperators() == null) || (currOp.getParentOperators().isEmpty())) { break; } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java (working copy) @@ -237,13 +237,14 @@ * @return the alias to the big table * @throws SemanticException */ - public static String genMapJoinOpAndLocalWork(MapredWork newWork, JoinOperator op, int mapJoinPos) + public static String genMapJoinOpAndLocalWork(HiveConf conf, MapredWork newWork, + JoinOperator op, int mapJoinPos) throws SemanticException { LinkedHashMap, OpParseContext> opParseCtxMap = newWork.getMapWork().getOpParseCtxMap(); QBJoinTree newJoinTree = newWork.getMapWork().getJoinTree(); // generate the map join operator; already checked the map join - MapJoinOperator newMapJoinOp = MapJoinProcessor.convertMapJoin(opParseCtxMap, op, + MapJoinOperator newMapJoinOp = MapJoinProcessor.convertMapJoin(conf, opParseCtxMap, op, newJoinTree, mapJoinPos, true, false); return genLocalWorkForMapJoin(newWork, newMapJoinOp, mapJoinPos); } @@ -316,7 +317,7 @@ * are cached in memory * @param noCheckOuterJoin */ - public static MapJoinOperator convertMapJoin( + public static MapJoinOperator convertMapJoin(HiveConf conf, LinkedHashMap, OpParseContext> opParseCtxMap, JoinOperator op, QBJoinTree joinTree, int mapJoinPos, boolean noCheckOuterJoin, boolean validateMapJoinTree) @@ -373,21 +374,90 @@ pos++; } + // create the map-join operator + MapJoinOperator mapJoinOp = convertJoinOpMapJoinOp(conf, opParseCtxMap, + op, joinTree, mapJoinPos, noCheckOuterJoin); + + + // remove old parents + for (pos = 0; pos < newParentOps.size(); pos++) { + newParentOps.get(pos).removeChild(oldReduceSinkParentOps.get(pos)); + newParentOps.get(pos).getChildOperators().add(mapJoinOp); + } + + + mapJoinOp.getParentOperators().removeAll(oldReduceSinkParentOps); + mapJoinOp.setParentOperators(newParentOps); + + // make sure only map-joins can be performed. + if (validateMapJoinTree) { + validateMapJoinTypes(mapJoinOp); + } + + // change the children of the original join operator to point to the map + // join operator + + return mapJoinOp; + } + + public static MapJoinOperator convertJoinOpMapJoinOp(HiveConf hconf, + LinkedHashMap, OpParseContext> opParseCtxMap, + JoinOperator op, QBJoinTree joinTree, int mapJoinPos, boolean noCheckOuterJoin) + throws SemanticException { + + JoinDesc desc = op.getConf(); + JoinCondDesc[] condns = desc.getConds(); + Byte[] tagOrder = desc.getTagOrder(); + + // outer join cannot be performed on a table which is being cached + if (!noCheckOuterJoin) { + if (checkMapJoin(mapJoinPos, condns) < 0) { + throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg()); + } + } + + Map> keyExprMap = new HashMap>(); + + // Walk over all the sources (which are guaranteed to be reduce sink + // operators). + // The join outputs a concatenation of all the inputs. + QBJoinTree leftSrc = joinTree.getJoinSrc(); + List> oldReduceSinkParentOps = + new ArrayList>(); + if (leftSrc != null) { + // assert mapJoinPos == 0; + Operator parentOp = op.getParentOperators().get(0); + assert parentOp.getParentOperators().size() == 1; + oldReduceSinkParentOps.add(parentOp); + } + + + byte pos = 0; + for (String src : joinTree.getBaseSrc()) { + if (src != null) { + Operator parentOp = op.getParentOperators().get(pos); + assert parentOp.getParentOperators().size() == 1; + oldReduceSinkParentOps.add(parentOp); + } + pos++; + } + // get the join keys from old parent ReduceSink operators - for (pos = 0; pos < newParentOps.size(); pos++) { - ReduceSinkOperator oldPar = (ReduceSinkOperator) oldReduceSinkParentOps.get(pos); - ReduceSinkDesc rsconf = oldPar.getConf(); + for (pos = 0; pos < op.getParentOperators().size(); pos++) { + ReduceSinkOperator parent = (ReduceSinkOperator) oldReduceSinkParentOps.get(pos); + ReduceSinkDesc rsconf = parent.getConf(); List keys = rsconf.getKeyCols(); keyExprMap.put(pos, keys); } - // removing RS, only ExprNodeDesc is changed (key/value/filter exprs and colExprMap) - // others (output column-name, RR, schema) remain intact + List keyCols = keyExprMap.get(Byte.valueOf((byte) 0)); + StringBuilder keyOrder = new StringBuilder(); + for (int i = 0; i < keyCols.size(); i++) { + keyOrder.append("+"); + } + Map colExprMap = op.getColumnExprMap(); - List outputColumnNames = op.getConf().getOutputColumnNames(); - List schema = new ArrayList(op.getSchema().getSignature()); - Map> valueExprs = op.getConf().getExprs(); Map> newValueExprs = new HashMap>(); for (Map.Entry> entry : valueExprs.entrySet()) { @@ -411,45 +481,12 @@ } } - Map> filters = desc.getFilters(); - Map> newFilters = new HashMap>(); - for (Map.Entry> entry : filters.entrySet()) { - byte srcTag = entry.getKey(); - List filter = entry.getValue(); - - Operator terminal = oldReduceSinkParentOps.get(srcTag); - newFilters.put(srcTag, ExprNodeDescUtils.backtrack(filter, op, terminal)); - } - desc.setFilters(filters = newFilters); - - // remove old parents - for (pos = 0; pos < newParentOps.size(); pos++) { - newParentOps.get(pos).removeChild(oldReduceSinkParentOps.get(pos)); - } - - JoinCondDesc[] joinCondns = op.getConf().getConds(); - - Operator[] newPar = new Operator[newParentOps.size()]; - pos = 0; - for (Operator o : newParentOps) { - newPar[pos++] = o; - } - - List keyCols = keyExprMap.get(Byte.valueOf((byte) 0)); - StringBuilder keyOrder = new StringBuilder(); - for (int i = 0; i < keyCols.size(); i++) { - keyOrder.append("+"); - } - - TableDesc keyTableDesc = PlanUtils.getMapJoinKeyTableDesc(PlanUtils - .getFieldSchemasFromColumnList(keyCols, MAPJOINKEY_FIELDPREFIX)); - + // construct valueTableDescs and valueFilteredTableDescs List valueTableDescs = new ArrayList(); List valueFiltedTableDescs = new ArrayList(); - int[][] filterMap = desc.getFilterMap(); - for (pos = 0; pos < newParentOps.size(); pos++) { - List valueCols = newValueExprs.get(pos); + for (pos = 0; pos < op.getParentOperators().size(); pos++) { + List valueCols = newValueExprs.get(Byte.valueOf((byte) pos)); int length = valueCols.size(); List valueFilteredCols = new ArrayList(length); // deep copy expr node desc @@ -476,6 +513,19 @@ valueTableDescs.add(valueTableDesc); valueFiltedTableDescs.add(valueFilteredTableDesc); } + + Map> filters = desc.getFilters(); + Map> newFilters = new HashMap>(); + for (Map.Entry> entry : filters.entrySet()) { + byte srcTag = entry.getKey(); + List filter = entry.getValue(); + + Operator terminal = op.getParentOperators().get(srcTag); + newFilters.put(srcTag, ExprNodeDescUtils.backtrack(filter, op, terminal)); + } + desc.setFilters(filters = newFilters); + + // create dumpfile prefix needed to create descriptor String dumpFilePrefix = ""; if( joinTree.getMapAliases() != null ) { for(String mapAlias : joinTree.getMapAliases()) { @@ -485,15 +535,24 @@ } else { dumpFilePrefix = "mapfile"+PlanUtils.getCountForMapJoinDumpFilePrefix(); } + + List outputColumnNames = op.getConf().getOutputColumnNames(); + TableDesc keyTableDesc = PlanUtils.getMapJoinKeyTableDesc(hconf, + PlanUtils.getFieldSchemasFromColumnList(keyCols, MAPJOINKEY_FIELDPREFIX)); + JoinCondDesc[] joinCondns = op.getConf().getConds(); MapJoinDesc mapJoinDescriptor = new MapJoinDesc(keyExprMap, keyTableDesc, newValueExprs, valueTableDescs, valueFiltedTableDescs, outputColumnNames, mapJoinPos, joinCondns, filters, op.getConf().getNoOuterJoin(), dumpFilePrefix); + mapJoinDescriptor.setStatistics(op.getConf().getStatistics()); mapJoinDescriptor.setTagOrder(tagOrder); mapJoinDescriptor.setNullSafes(desc.getNullSafes()); mapJoinDescriptor.setFilterMap(desc.getFilterMap()); + // reduce sink row resolver used to generate map join op + RowResolver outputRS = opParseCtxMap.get(op).getRowResolver(); + MapJoinOperator mapJoinOp = (MapJoinOperator) OperatorFactory.getAndMakeChild( - mapJoinDescriptor, new RowSchema(outputRS.getColumnInfos()), newPar); + mapJoinDescriptor, new RowSchema(outputRS.getColumnInfos()), op.getParentOperators()); OpParseContext ctx = new OpParseContext(outputRS); opParseCtxMap.put(mapJoinOp, ctx); @@ -501,8 +560,6 @@ mapJoinOp.getConf().setReversedExprs(op.getConf().getReversedExprs()); mapJoinOp.setColumnExprMap(colExprMap); - // change the children of the original join operator to point to the map - // join operator List> childOps = op.getChildOperators(); for (Operator childOp : childOps) { childOp.replaceParent(op, mapJoinOp); @@ -509,16 +566,11 @@ } mapJoinOp.setChildOperators(childOps); - mapJoinOp.setParentOperators(newParentOps); op.setChildOperators(null); op.setParentOperators(null); - // make sure only map-joins can be performed. - if (validateMapJoinTree) { - validateMapJoinTypes(mapJoinOp); - } + return mapJoinOp; - return mapJoinOp; } /** @@ -534,7 +586,7 @@ * are cached in memory * @param noCheckOuterJoin */ - public static MapJoinOperator convertSMBJoinToMapJoin( + public static MapJoinOperator convertSMBJoinToMapJoin(HiveConf hconf, Map, OpParseContext> opParseCtxMap, SMBMapJoinOperator smbJoinOp, QBJoinTree joinTree, int bigTablePos, boolean noCheckOuterJoin) throws SemanticException { @@ -541,7 +593,7 @@ // Create a new map join operator SMBJoinDesc smbJoinDesc = smbJoinOp.getConf(); List keyCols = smbJoinDesc.getKeys().get(Byte.valueOf((byte) 0)); - TableDesc keyTableDesc = PlanUtils.getMapJoinKeyTableDesc(PlanUtils + TableDesc keyTableDesc = PlanUtils.getMapJoinKeyTableDesc(hconf, PlanUtils .getFieldSchemasFromColumnList(keyCols, MAPJOINKEY_FIELDPREFIX)); MapJoinDesc mapJoinDesc = new MapJoinDesc(smbJoinDesc.getKeys(), keyTableDesc, smbJoinDesc.getExprs(), @@ -550,6 +602,8 @@ bigTablePos, smbJoinDesc.getConds(), smbJoinDesc.getFilters(), smbJoinDesc.isNoOuterJoin(), smbJoinDesc.getDumpFilePrefix()); + mapJoinDesc.setStatistics(smbJoinDesc.getStatistics()); + RowResolver joinRS = opParseCtxMap.get(smbJoinOp).getRowResolver(); // The mapjoin has the same schema as the join operator MapJoinOperator mapJoinOp = (MapJoinOperator) OperatorFactory.getAndMakeChild( @@ -589,8 +643,8 @@ LinkedHashMap, OpParseContext> opParseCtxMap = pctx .getOpParseCtx(); - MapJoinOperator mapJoinOp = convertMapJoin(opParseCtxMap, op, joinTree, mapJoinPos, - noCheckOuterJoin, true); + MapJoinOperator mapJoinOp = convertMapJoin(pctx.getConf(), opParseCtxMap, op, + joinTree, mapJoinPos, noCheckOuterJoin, true); // create a dummy select to select all columns genSelectPlan(pctx, mapJoinOp); return mapJoinOp; @@ -610,7 +664,7 @@ * If see a right outer join, set lastSeenRightOuterJoin to true, clear the * bigTableCandidates, and add right side to the bigTableCandidates, it means * the right side of a right outer join always win. If see a full outer join, - * return null immediately (no one can be the big table, can not do a + * return empty set immediately (no one can be the big table, can not do a * mapjoin). * * @@ -636,7 +690,8 @@ // changed in future, these 2 are not missing. seenOuterJoin = true; lastSeenRightOuterJoin = false; - return null; + // empty set - cannot convert + return new HashSet(); } else if (joinType == JoinDesc.LEFT_OUTER_JOIN || joinType == JoinDesc.LEFT_SEMI_JOIN) { seenOuterJoin = true; Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java (working copy) @@ -115,7 +115,8 @@ if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES)) { transformations.add(new StatsOptimizer()); } - if (pctx.getContext().getExplain()) { + if (pctx.getContext().getExplain() || + HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPTIMIZE_TEZ)) { transformations.add(new AnnotateWithStatistics()); } transformations.add(new SimpleFetchOptimizer()); // must be called last Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java (working copy) @@ -0,0 +1,175 @@ +package org.apache.hadoop.hive.ql.optimizer; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Stack; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.FileSinkOperator; +import org.apache.hadoop.hive.ql.exec.HashTableDummyOperator; +import org.apache.hadoop.hive.ql.exec.MapJoinOperator; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.OperatorFactory; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; +import org.apache.hadoop.hive.ql.exec.RowSchema; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.parse.GenTezProcContext; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.BaseWork; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.HashTableDummyDesc; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.PlanUtils; +import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.ql.plan.TezWork; +import org.apache.hadoop.hive.ql.plan.TezWork.EdgeType; + +public class ReduceSinkMapJoinProc implements NodeProcessor { + + protected transient Log LOG = LogFactory.getLog(this.getClass().getName()); + + /* (non-Javadoc) + * This processor addresses the RS-MJ case that occurs in tez on the small/hash + * table side of things. The connection between the work that RS will be a part of + * must be connected to the MJ work via be a broadcast edge. + * We should not walk down the tree when we encounter this pattern because: + * the type of work (map work or reduce work) needs to be determined + * on the basis of the big table side because it may be a mapwork (no need for shuffle) + * or reduce work. + */ + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procContext, Object... nodeOutputs) + throws SemanticException { + GenTezProcContext context = (GenTezProcContext) procContext; + context.preceedingWork = null; + context.currentRootOperator = null; + + MapJoinOperator mapJoinOp = (MapJoinOperator)nd; + Operator childOp = mapJoinOp.getChildOperators().get(0); + + ReduceSinkOperator parentRS = (ReduceSinkOperator)stack.get(stack.size() - 2); + + // remember the original parent list before we start modifying it. + if (!context.mapJoinParentMap.containsKey(mapJoinOp)) { + List> parents = new ArrayList(mapJoinOp.getParentOperators()); + context.mapJoinParentMap.put(mapJoinOp, parents); + } + + BaseWork myWork = null; + + while (childOp != null) { + if ((childOp instanceof ReduceSinkOperator) || (childOp instanceof FileSinkOperator)) { + /* + * if there was a pre-existing work generated for the big-table mapjoin side, + * we need to hook the work generated for the RS (associated with the RS-MJ pattern) + * with the pre-existing work. + * + * Otherwise, we need to associate that the reduce sink/file sink down the MJ path + * to be linked to the RS work (associated with the RS-MJ pattern). + * + */ + + myWork = context.operatorWorkMap.get(childOp); + BaseWork parentWork = context.operatorWorkMap.get(parentRS); + + // set the link between mapjoin and parent vertex + int pos = context.mapJoinParentMap.get(mapJoinOp).indexOf(parentRS); + if (pos == -1) { + throw new SemanticException("Cannot find position of parent in mapjoin"); + } + LOG.debug("Mapjoin "+mapJoinOp+", pos: "+pos+" --> "+parentWork.getName()); + mapJoinOp.getConf().getParentToInput().put(pos, parentWork.getName()); + + if (myWork != null) { + // link the work with the work associated with the reduce sink that triggered this rule + TezWork tezWork = context.currentTask.getWork(); + tezWork.connect(parentWork, myWork, EdgeType.BROADCAST_EDGE); + + // remember the output name of the reduce sink + parentRS.getConf().setOutputName(myWork.getName()); + + } else { + List linkWorkList = context.linkOpWithWorkMap.get(childOp); + if (linkWorkList == null) { + linkWorkList = new ArrayList(); + } + linkWorkList.add(parentWork); + context.linkOpWithWorkMap.put(childOp, linkWorkList); + + List reduceSinks + = context.linkWorkWithReduceSinkMap.get(parentWork); + if (reduceSinks == null) { + reduceSinks = new ArrayList(); + } + reduceSinks.add(parentRS); + context.linkWorkWithReduceSinkMap.put(parentWork, reduceSinks); + } + + break; + } + + if ((childOp.getChildOperators() != null) && (childOp.getChildOperators().size() >= 1)) { + childOp = childOp.getChildOperators().get(0); + } else { + break; + } + } + + // create the dummy operators + List> dummyOperators = + new ArrayList>(); + + // create an new operator: HashTableDummyOperator, which share the table desc + HashTableDummyDesc desc = new HashTableDummyDesc(); + @SuppressWarnings("unchecked") + HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(desc); + TableDesc tbl; + + // need to create the correct table descriptor for key/value + RowSchema rowSchema = parentRS.getParentOperators().get(0).getSchema(); + tbl = PlanUtils.getReduceValueTableDesc(PlanUtils.getFieldSchemasFromRowSchema(rowSchema, "")); + dummyOp.getConf().setTbl(tbl); + + Map> keyExprMap = mapJoinOp.getConf().getKeys(); + List keyCols = keyExprMap.get(Byte.valueOf((byte) 0)); + StringBuffer keyOrder = new StringBuffer(); + for (ExprNodeDesc k: keyCols) { + keyOrder.append("+"); + } + TableDesc keyTableDesc = PlanUtils.getReduceKeyTableDesc(PlanUtils + .getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString()); + mapJoinOp.getConf().setKeyTableDesc(keyTableDesc); + + // let the dummy op be the parent of mapjoin op + mapJoinOp.replaceParent(parentRS, dummyOp); + List> dummyChildren = + new ArrayList>(); + dummyChildren.add(mapJoinOp); + dummyOp.setChildOperators(dummyChildren); + dummyOperators.add(dummyOp); + + // cut the operator tree so as to not retain connections from the parent RS downstream + List> childOperators = parentRS.getChildOperators(); + int childIndex = childOperators.indexOf(mapJoinOp); + childOperators.remove(childIndex); + + // the "work" needs to know about the dummy operators. They have to be separately initialized + // at task startup + if (myWork != null) { + myWork.addDummyOp(dummyOp); + } else { + List> dummyList = dummyOperators; + if (context.linkChildOpWithDummyOp.containsKey(childOp)) { + dummyList = context.linkChildOpWithDummyOp.get(childOp); + } + dummyList.add(dummyOp); + context.linkChildOpWithDummyOp.put(childOp, dummyList); + } + return true; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java (working copy) @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.optimizer; + +import java.util.Stack; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.parse.OptimizeTezProcContext; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; + +/** + * SetReducerParallelism determines how many reducers should + * be run for a given reduce sink. + */ +public class SetReducerParallelism implements NodeProcessor { + + static final private Log LOG = LogFactory.getLog(SetReducerParallelism.class.getName()); + + @SuppressWarnings("unchecked") + @Override + public Object process(Node nd, Stack stack, + NodeProcessorCtx procContext, Object... nodeOutputs) + throws SemanticException { + + OptimizeTezProcContext context = (OptimizeTezProcContext) procContext; + + ReduceSinkOperator sink = (ReduceSinkOperator) nd; + ReduceSinkDesc desc = sink.getConf(); + + long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER); + int maxReducers = context.conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); + int constantReducers = context.conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS); + + if (context.visitedReduceSinks.contains(sink)) { + // skip walking the children + LOG.debug("Already processed reduce sink: " + sink.getName()); + return true; + } + + context.visitedReduceSinks.add(sink); + + if (desc.getNumReducers() <= 0) { + if (constantReducers > 0) { + LOG.info("Parallelism for reduce sink "+sink+" set by user to "+constantReducers); + desc.setNumReducers(constantReducers); + } else { + long numberOfBytes = 0; + + // we need to add up all the estimates from the siblings of this reduce sink + for (Operator sibling: + sink.getChildOperators().get(0).getParentOperators()) { + if (sibling.getStatistics() != null) { + numberOfBytes += sibling.getStatistics().getDataSize(); + } else { + LOG.warn("No stats available from: "+sibling); + } + } + + int numReducers = Utilities.estimateReducers(numberOfBytes, bytesPerReducer, + maxReducers, false); + LOG.info("Set parallelism for reduce sink "+sink+" to: "+numReducers); + desc.setNumReducers(numReducers); + } + } else { + LOG.info("Number of reducers determined to be: "+desc.getNumReducers()); + } + + return false; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java (working copy) @@ -398,7 +398,8 @@ return parseContext.getTopToTable().get(tsOp); } } - if ((op.getParentOperators() == null) || (op.getParentOperators().size() > 1)) { + if ((op.getParentOperators() == null) || (op.getParentOperators().isEmpty()) || + (op.getParentOperators().size() > 1)) { return null; } op = op.getParentOperators().get(0); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java (working copy) @@ -168,7 +168,7 @@ int numAliases = order.length; Set bigTableCandidates = MapJoinProcessor.getBigTableCandidates(joinDesc.getConds()); - if (bigTableCandidates == null) { + if (bigTableCandidates.isEmpty()) { continue; } @@ -346,7 +346,7 @@ "involved in this operator"); return correlatedReduceSinkOperators; } - if (current.getParentOperators() == null) { + if ((current.getParentOperators() == null) || (current.getParentOperators().isEmpty())) { return correlatedReduceSinkOperators; } if (current instanceof PTFOperator) { Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java (working copy) @@ -189,7 +189,8 @@ // optimize this newWork given the big table position String bigTableAlias = - MapJoinProcessor.genMapJoinOpAndLocalWork(newWork, newJoinOp, bigTablePosition); + MapJoinProcessor.genMapJoinOpAndLocalWork(physicalContext.getParseContext().getConf(), + newWork, newJoinOp, bigTablePosition); return new ObjectPair(newTask, bigTableAlias); } @@ -434,7 +435,7 @@ .getConds()); // no table could be the big table; there is no need to convert - if (bigTableCandidates == null) { + if (bigTableCandidates.isEmpty()) { return null; } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java (working copy) @@ -204,7 +204,7 @@ Operator currOp = originalSMBJoinOp; while (true) { - if (currOp.getChildOperators() == null) { + if ((currOp.getChildOperators() == null) || (currOp.getChildOperators().isEmpty())) { if (currOp instanceof FileSinkOperator) { FileSinkOperator fsOp = (FileSinkOperator)currOp; // The query has enforced that a sort-merge join should be performed. @@ -432,7 +432,8 @@ opParseContextMap.put(newSMBJoinOp, opParseContextMap.get(oldSMBJoinOp)); // generate the map join operator - return MapJoinProcessor.convertSMBJoinToMapJoin(opParseContextMap, newSMBJoinOp, + return MapJoinProcessor.convertSMBJoinToMapJoin(physicalContext.getConf(), + opParseContextMap, newSMBJoinOp, joinTree, mapJoinPos, true); } } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java (working copy) @@ -48,6 +48,7 @@ import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.UDF; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; +import org.apache.hadoop.hive.ql.exec.tez.TezTask; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; import org.apache.hadoop.hive.ql.exec.vector.VectorizationContextRegion; @@ -69,6 +70,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.AbstractOperatorDesc; import org.apache.hadoop.hive.ql.plan.AggregationDesc; +import org.apache.hadoop.hive.ql.plan.BaseWork; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; @@ -77,6 +79,7 @@ import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; +import org.apache.hadoop.hive.ql.plan.TezWork; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.ql.udf.UDFAcos; import org.apache.hadoop.hive.ql.udf.UDFAsin; @@ -289,17 +292,27 @@ throws SemanticException { Task currTask = (Task) nd; if (currTask instanceof MapRedTask) { - boolean ret = validateMRTask((MapRedTask) currTask); - if (ret) { - vectorizeMRTask((MapRedTask) currTask); + convertMapWork(((MapRedTask) currTask).getWork().getMapWork()); + } else if (currTask instanceof TezTask) { + TezWork work = ((TezTask) currTask).getWork(); + for (BaseWork w: work.getAllWork()) { + if (w instanceof MapWork) { + convertMapWork((MapWork)w); + } } } return null; } - private boolean validateMRTask(MapRedTask mrTask) throws SemanticException { - MapWork mapWork = mrTask.getWork().getMapWork(); + private void convertMapWork(MapWork mapWork) throws SemanticException { + boolean ret = validateMapWork(mapWork); + if (ret) { + vectorizeMapWork(mapWork); + } + } + private boolean validateMapWork(MapWork mapWork) throws SemanticException { + // Validate the input format for (String path : mapWork.getPathToPartitionInfo().keySet()) { PartitionDesc pd = mapWork.getPathToPartitionInfo().get(path); @@ -334,12 +347,11 @@ return true; } - private void vectorizeMRTask(MapRedTask mrTask) throws SemanticException { + private void vectorizeMapWork(MapWork mapWork) throws SemanticException { LOG.info("Vectorizing task..."); - MapWork mapWork = mrTask.getWork().getMapWork(); mapWork.setVectorMode(true); Map opRules = new LinkedHashMap(); - VectorizationNodeProcessor vnp = new VectorizationNodeProcessor(mrTask); + VectorizationNodeProcessor vnp = new VectorizationNodeProcessor(mapWork); opRules.put(new RuleRegExp("R1", TableScanOperator.getOperatorName() + ".*" + ReduceSinkOperator.getOperatorName()), vnp); opRules.put(new RuleRegExp("R2", TableScanOperator.getOperatorName() + ".*" @@ -399,8 +411,8 @@ private final Set> opsDone = new HashSet>(); - public VectorizationNodeProcessor(MapRedTask mrTask) { - this.mWork = mrTask.getWork().getMapWork(); + public VectorizationNodeProcessor(MapWork mWork) { + this.mWork = mWork; } public Map> getScratchColumnVectorTypes() { Index: ql/src/java/org/apache/hadoop/hive/ql/parse/FileSinkProcessor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/FileSinkProcessor.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/FileSinkProcessor.java (working copy) @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +import java.util.Stack; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.FileSinkOperator; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; + +/** + * FileSinkProcessor handles addition of merge, move and stats tasks for filesinks + * + */ +public class FileSinkProcessor implements NodeProcessor { + + static final private Log LOG = LogFactory.getLog(FileSinkProcessor.class.getName()); + + @Override + /* + * (non-Javadoc) + * we should ideally not modify the tree we traverse. + * However, since we need to walk the tree at any time when we modify the + * operator, we might as well do it here. + */ + public Object process(Node nd, Stack stack, + NodeProcessorCtx procCtx, Object... nodeOutputs) + throws SemanticException { + + GenTezProcContext context = (GenTezProcContext) procCtx; + FileSinkOperator fileSink = (FileSinkOperator) nd; + ParseContext parseContext = context.parseContext; + + + boolean isInsertTable = // is INSERT OVERWRITE TABLE + GenMapRedUtils.isInsertInto(parseContext, fileSink); + HiveConf hconf = parseContext.getConf(); + + boolean chDir = GenMapRedUtils.isMergeRequired(context.moveTask, + hconf, fileSink, context.currentTask, isInsertTable); + + String finalName = GenMapRedUtils.createMoveTask(context.currentTask, + chDir, fileSink, parseContext, context.moveTask, hconf, context.dependencyTask); + + if (chDir) { + // Merge the files in the destination table/partitions by creating Map-only merge job + // If underlying data is RCFile a RCFileBlockMerge task would be created. + LOG.info("using CombineHiveInputformat for the merge job"); + GenMapRedUtils.createMRWorkForMergingFiles(fileSink, finalName, + context.dependencyTask, context.moveTask, + hconf, context.currentTask); + } + + return true; + } +} \ No newline at end of file Index: ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java (working copy) @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.DependencyCollectionTask; +import org.apache.hadoop.hive.ql.exec.MapJoinOperator; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.exec.tez.TezTask; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.plan.BaseWork; +import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; +import org.apache.hadoop.hive.ql.plan.MoveWork; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.TezWork; + +/** + * GenTezProcContext. GenTezProcContext maintains information + * about the tasks and operators as we walk the operator tree + * to break them into TezTasks. + * + */ +public class GenTezProcContext implements NodeProcessorCtx{ + + public final ParseContext parseContext; + public final HiveConf conf; + public final List> moveTask; + + // rootTasks is the entry point for all generated tasks + public final List> rootTasks; + + public final Set inputs; + public final Set outputs; + + // holds the root of the operator tree we're currently processing + // this could be a table scan, but also a join, ptf, etc (i.e.: + // first operator of a reduce task. + public Operator currentRootOperator; + + // this is the original parent of the currentRootOperator as we scan + // through the graph. A root operator might have multiple parents and + // we just use this one to remember where we came from in the current + // walk. + public Operator parentOfRoot; + + // tez task we're currently processing + public TezTask currentTask; + + // last work we've processed (in order to hook it up to the current + // one. + public BaseWork preceedingWork; + + // map that keeps track of the last operator of a task to the work + // that follows it. This is used for connecting them later. + public final Map, BaseWork> leafOperatorToFollowingWork; + + // a map that keeps track of work that need to be linked while + // traversing an operator tree + public final Map, List> linkOpWithWorkMap; + + // a map to keep track of what reduce sinks have to be hooked up to + // map join work + public final Map> linkWorkWithReduceSinkMap; + + // a map that maintains operator (file-sink or reduce-sink) to work mapping + public final Map, BaseWork> operatorWorkMap; + + // a map to keep track of which root generated which work + public final Map, BaseWork> rootToWorkMap; + + // we need to keep the original list of operators in the map join to know + // what position in the mapjoin the different parent work items will have. + public final Map>> mapJoinParentMap; + + // remember the dummy ops we created + public final Map, List>> linkChildOpWithDummyOp; + + // used to group dependent tasks for multi table inserts + public final DependencyCollectionTask dependencyTask; + + @SuppressWarnings("unchecked") + public GenTezProcContext(HiveConf conf, ParseContext parseContext, + List> moveTask, List> rootTasks, + Set inputs, Set outputs) { + + this.conf = conf; + this.parseContext = parseContext; + this.moveTask = moveTask; + this.rootTasks = rootTasks; + this.inputs = inputs; + this.outputs = outputs; + this.currentTask = (TezTask) TaskFactory.get(new TezWork(), conf); + this.leafOperatorToFollowingWork = new HashMap, BaseWork>(); + this.linkOpWithWorkMap = new HashMap, List>(); + this.linkWorkWithReduceSinkMap = new HashMap>(); + this.operatorWorkMap = new HashMap, BaseWork>(); + this.rootToWorkMap = new HashMap, BaseWork>(); + this.mapJoinParentMap = new HashMap>>(); + this.linkChildOpWithDummyOp = new HashMap, List>>(); + this.dependencyTask = (DependencyCollectionTask) + TaskFactory.get(new DependencyCollectionWork(), conf); + + rootTasks.add(currentTask); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java (working copy) @@ -0,0 +1,259 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +import java.util.ArrayList; +import java.util.List; +import java.util.Stack; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.HashTableDummyOperator; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; +import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; +import org.apache.hadoop.hive.ql.plan.BaseWork; +import org.apache.hadoop.hive.ql.plan.MapWork; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.ReduceWork; +import org.apache.hadoop.hive.ql.plan.TezWork; +import org.apache.hadoop.hive.ql.plan.TezWork.EdgeType; + +/** + * GenTezWork separates the operator tree into tez tasks. + * It is called once per leaf operator (operator that forces + * a new execution unit.) and break the operators into work + * and tasks along the way. + */ +public class GenTezWork implements NodeProcessor { + + static final private Log LOG = LogFactory.getLog(GenTezWork.class.getName()); + + // sequence number is used to name vertices (e.g.: Map 1, Reduce 14, ...) + private int sequenceNumber = 0; + + @Override + public Object process(Node nd, Stack stack, + NodeProcessorCtx procContext, Object... nodeOutputs) + throws SemanticException { + + GenTezProcContext context = (GenTezProcContext) procContext; + + assert context != null && context.currentTask != null + && context.currentRootOperator != null; + + // Operator is a file sink or reduce sink. Something that forces + // a new vertex. + Operator operator = (Operator) nd; + + // root is the start of the operator pipeline we're currently + // packing into a vertex, typically a table scan, union or join + Operator root = context.currentRootOperator; + + LOG.debug("Root operator: " + root); + LOG.debug("Leaf operator: " + operator); + + TezWork tezWork = context.currentTask.getWork(); + + // Right now the work graph is pretty simple. If there is no + // Preceding work we have a root and will generate a map + // vertex. If there is a preceding work we will generate + // a reduce vertex + BaseWork work; + if (context.rootToWorkMap.containsKey(root)) { + // having seen the root operator before means there was a branch in the + // operator graph. There's typically two reasons for that: a) mux/demux + // b) multi insert. Mux/Demux will hit the same leaf again, multi insert + // will result into a vertex with multiple FS or RS operators. + + // At this point we don't have to do anything special in this case. Just + // run through the regular paces w/o creating a new task. + work = context.rootToWorkMap.get(root); + } else { + // create a new vertex + if (context.preceedingWork == null) { + work = createMapWork(context, root, tezWork); + } else { + work = createReduceWork(context, root, tezWork); + } + context.rootToWorkMap.put(root, work); + } + + // We're scanning a tree from roots to leaf (this is not technically + // correct, demux and mux operators might form a diamond shape, but + // we will only scan one path and ignore the others, because the + // diamond shape is always contained in a single vertex). The scan + // is depth first and because we remove parents when we pack a pipeline + // into a vertex we will never visit any node twice. But because of that + // we might have a situation where we need to connect 'work' that comes after + // the 'work' we're currently looking at. + // + // Also note: the concept of leaf and root is reversed in hive for historical + // reasons. Roots are data sources, leaves are data sinks. I know. + if (context.leafOperatorToFollowingWork.containsKey(operator)) { + + BaseWork followingWork = context.leafOperatorToFollowingWork.get(operator); + + // need to add this branch to the key + value info + assert operator instanceof ReduceSinkOperator + && followingWork instanceof ReduceWork; + ReduceSinkOperator rs = (ReduceSinkOperator) operator; + ReduceWork rWork = (ReduceWork) followingWork; + GenMapRedUtils.setKeyAndValueDesc(rWork, rs); + + // remember which parent belongs to which tag + rWork.getTagToInput().put(rs.getConf().getTag(), work.getName()); + + // remember the output name of the reduce sink + rs.getConf().setOutputName(rWork.getName()); + + // add dependency between the two work items + tezWork.connect(work, rWork, EdgeType.SIMPLE_EDGE); + } + + // This is where we cut the tree as described above. We also remember that + // we might have to connect parent work with this work later. + for (Operator parent: new ArrayList>(root.getParentOperators())) { + context.leafOperatorToFollowingWork.put(parent, work); + LOG.debug("Removing " + parent + " as parent from " + root); + root.removeParent(parent); + } + + // No children means we're at the bottom. If there are more operators to scan + // the next item will be a new root. + if (!operator.getChildOperators().isEmpty()) { + assert operator.getChildOperators().size() == 1; + context.parentOfRoot = operator; + context.currentRootOperator = operator.getChildOperators().get(0); + context.preceedingWork = work; + } + + /* + * this happens in case of map join operations. + * The tree looks like this: + * + * RS <--- we are here perhaps + * | + * MapJoin + * / \ + * RS TS + * / + * TS + * + * If we are at the RS pointed above, and we may have already visited the + * RS following the TS, we have already generated work for the TS-RS. + * We need to hook the current work to this generated work. + */ + context.operatorWorkMap.put(operator, work); + List linkWorkList = context.linkOpWithWorkMap.get(operator); + if (linkWorkList != null) { + if (context.linkChildOpWithDummyOp.containsKey(operator)) { + for (Operator dummy: context.linkChildOpWithDummyOp.get(operator)) { + work.addDummyOp((HashTableDummyOperator) dummy); + } + } + for (BaseWork parentWork : linkWorkList) { + tezWork.connect(parentWork, work, EdgeType.BROADCAST_EDGE); + + // need to set up output name for reduce sink not that we know the name + // of the downstream work + for (ReduceSinkOperator r: + context.linkWorkWithReduceSinkMap.get(parentWork)) { + r.getConf().setOutputName(work.getName()); + } + } + } + + return null; + } + + protected ReduceWork createReduceWork(GenTezProcContext context, Operator root, + TezWork tezWork) { + assert !root.getParentOperators().isEmpty(); + ReduceWork reduceWork = new ReduceWork("Reducer "+ (++sequenceNumber)); + LOG.debug("Adding reduce work (" + reduceWork.getName() + ") for " + root); + reduceWork.setReducer(root); + reduceWork.setNeedsTagging(GenMapRedUtils.needsTagging(reduceWork)); + + // All parents should be reduce sinks. We pick the one we just walked + // to choose the number of reducers. In the join/union case they will + // all be -1. In sort/order case where it matters there will be only + // one parent. + assert context.parentOfRoot instanceof ReduceSinkOperator; + ReduceSinkOperator reduceSink = (ReduceSinkOperator) context.parentOfRoot; + + reduceWork.setNumReduceTasks(reduceSink.getConf().getNumReducers()); + + setupReduceSink(context, reduceWork, reduceSink); + + tezWork.add(reduceWork); + tezWork.connect( + context.preceedingWork, + reduceWork, EdgeType.SIMPLE_EDGE); + + return reduceWork; + } + + protected void setupReduceSink(GenTezProcContext context, ReduceWork reduceWork, + ReduceSinkOperator reduceSink) { + + LOG.debug("Setting up reduce sink: " + reduceSink + + " with following reduce work: " + reduceWork.getName()); + + // need to fill in information about the key and value in the reducer + GenMapRedUtils.setKeyAndValueDesc(reduceWork, reduceSink); + + // remember which parent belongs to which tag + reduceWork.getTagToInput().put(reduceSink.getConf().getTag(), + context.preceedingWork.getName()); + + // remember the output name of the reduce sink + reduceSink.getConf().setOutputName(reduceWork.getName()); + } + + protected MapWork createMapWork(GenTezProcContext context, Operator root, + TezWork tezWork) throws SemanticException { + assert root.getParentOperators().isEmpty(); + MapWork mapWork = new MapWork("Map "+ (++sequenceNumber)); + LOG.debug("Adding map work (" + mapWork.getName() + ") for " + root); + + // map work starts with table scan operators + assert root instanceof TableScanOperator; + String alias = ((TableScanOperator)root).getConf().getAlias(); + + setupMapWork(mapWork, context, root, alias); + + // add new item to the tez work + tezWork.add(mapWork); + + return mapWork; + } + + // this method's main use is to help unit testing this class + protected void setupMapWork(MapWork mapWork, GenTezProcContext context, + Operator root, String alias) throws SemanticException { + // All the setup is done in GenMapRedUtils + GenMapRedUtils.setMapWork(mapWork, context.parseContext, + context.inputs, null, root, alias, context.conf, false); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWorkWalker.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWorkWalker.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWorkWalker.java (working copy) @@ -0,0 +1,110 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +import java.util.Collection; +import java.util.HashMap; +import java.util.List; + +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; +import org.apache.hadoop.hive.ql.lib.Dispatcher; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.plan.BaseWork; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +/** + * Walks the operator tree in DFS fashion. + */ +public class GenTezWorkWalker extends DefaultGraphWalker { + + private final GenTezProcContext ctx; + + /** + * constructor of the walker - the dispatcher is passed. + * + * @param disp the dispatcher to be called for each node visited + * @param ctx the context where we'll set the current root operator + * + */ + public GenTezWorkWalker(Dispatcher disp, GenTezProcContext ctx) { + super(disp); + this.ctx = ctx; + } + + private void setRoot(Node nd) { + ctx.currentRootOperator = (Operator) nd; + ctx.preceedingWork = null; + ctx.parentOfRoot = null; + } + + /** + * starting point for walking. + * + * @throws SemanticException + */ + @Override + public void startWalking(Collection startNodes, + HashMap nodeOutput) throws SemanticException { + toWalk.addAll(startNodes); + while (toWalk.size() > 0) { + Node nd = toWalk.remove(0); + setRoot(nd); + walk(nd); + if (nodeOutput != null) { + nodeOutput.put(nd, retMap.get(nd)); + } + } + } + + /** + * Walk the given operator. + * + * @param nd operator being walked + */ + @Override + public void walk(Node nd) throws SemanticException { + List children = nd.getChildren(); + + // maintain the stack of operators encountered + opStack.push(nd); + Boolean skip = dispatchAndReturn(nd, opStack); + + // save some positional state + Operator currentRoot = ctx.currentRootOperator; + Operator parentOfRoot = ctx.parentOfRoot; + BaseWork preceedingWork = ctx.preceedingWork; + + if (skip == null || !skip) { + // move all the children to the front of queue + for (Node ch : children) { + + // and restore the state before walking each child + ctx.currentRootOperator = currentRoot; + ctx.parentOfRoot = parentOfRoot; + ctx.preceedingWork = preceedingWork; + + walk(ch); + } + } + + // done with this operator + opStack.pop(); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java (working copy) @@ -22,10 +22,10 @@ import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -33,21 +33,14 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.ql.Context; -import org.apache.hadoop.hive.ql.ErrorMsg; -import org.apache.hadoop.hive.ql.exec.ColumnStatsTask; import org.apache.hadoop.hive.ql.exec.ConditionalTask; -import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; -import org.apache.hadoop.hive.ql.exec.StatsTask; import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.UnionOperator; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.mr.ExecDriver; @@ -61,9 +54,6 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.GenMRFileSink1; import org.apache.hadoop.hive.ql.optimizer.GenMROperator; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext; @@ -73,299 +63,25 @@ import org.apache.hadoop.hive.ql.optimizer.GenMRRedSink3; import org.apache.hadoop.hive.ql.optimizer.GenMRTableScan1; import org.apache.hadoop.hive.ql.optimizer.GenMRUnion1; -import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; import org.apache.hadoop.hive.ql.optimizer.MapJoinFactory; import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext; import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalOptimizer; -import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc; -import org.apache.hadoop.hive.ql.plan.ColumnStatsWork; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.FetchWork; -import org.apache.hadoop.hive.ql.plan.LoadFileDesc; -import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; -import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.TableDesc; -import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; import org.apache.hadoop.hive.shims.ShimLoader; -public class MapReduceCompiler { +public class MapReduceCompiler extends TaskCompiler { protected final Log LOG = LogFactory.getLog(MapReduceCompiler.class); - private Hive db; - protected LogHelper console; - private HiveConf conf; - public MapReduceCompiler() { } - public void init(HiveConf conf, LogHelper console, Hive db) { - this.conf = conf; - this.db = db; - this.console = console; - } - - @SuppressWarnings({"nls", "unchecked"}) - public void compile(final ParseContext pCtx, final List> rootTasks, - final HashSet inputs, final HashSet outputs) throws SemanticException { - - Context ctx = pCtx.getContext(); - GlobalLimitCtx globalLimitCtx = pCtx.getGlobalLimitCtx(); - QB qb = pCtx.getQB(); - List> mvTask = new ArrayList>(); - - List loadTableWork = pCtx.getLoadTableWork(); - List loadFileWork = pCtx.getLoadFileWork(); - - boolean isCStats = qb.isAnalyzeRewrite(); - - if (pCtx.getFetchTask() != null) { - return; - } - - /* - * In case of a select, use a fetch task instead of a move task. - * If the select is from analyze table column rewrite, don't create a fetch task. Instead create - * a column stats task later. - */ - if (pCtx.getQB().getIsQuery() && !isCStats) { - if ((!loadTableWork.isEmpty()) || (loadFileWork.size() != 1)) { - throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg()); - } - - LoadFileDesc loadFileDesc = loadFileWork.get(0); - - String cols = loadFileDesc.getColumns(); - String colTypes = loadFileDesc.getColumnTypes(); - - TableDesc resultTab = pCtx.getFetchTabledesc(); - if (resultTab == null) { - String resFileFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT); - resultTab = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, resFileFormat); - } - - FetchWork fetch = new FetchWork(new Path(loadFileDesc.getSourceDir()).toString(), - resultTab, qb.getParseInfo().getOuterQueryLimit()); - fetch.setSource(pCtx.getFetchSource()); - fetch.setSink(pCtx.getFetchSink()); - - pCtx.setFetchTask((FetchTask) TaskFactory.get(fetch, conf)); - - // For the FetchTask, the limit optimization requires we fetch all the rows - // in memory and count how many rows we get. It's not practical if the - // limit factor is too big - int fetchLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITOPTMAXFETCH); - if (globalLimitCtx.isEnable() && globalLimitCtx.getGlobalLimit() > fetchLimit) { - LOG.info("For FetchTask, LIMIT " + globalLimitCtx.getGlobalLimit() + " > " + fetchLimit - + ". Doesn't qualify limit optimiztion."); - globalLimitCtx.disableOpt(); - } - } else if (!isCStats) { - for (LoadTableDesc ltd : loadTableWork) { - Task tsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); - mvTask.add(tsk); - // Check to see if we are stale'ing any indexes and auto-update them if we want - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEINDEXAUTOUPDATE)) { - IndexUpdater indexUpdater = new IndexUpdater(loadTableWork, inputs, conf); - try { - List> indexUpdateTasks = indexUpdater - .generateUpdateTasks(); - for (Task updateTask : indexUpdateTasks) { - tsk.addDependentTask(updateTask); - } - } catch (HiveException e) { - console - .printInfo("WARNING: could not auto-update stale indexes, which are not in sync"); - } - } - } - - boolean oneLoadFile = true; - for (LoadFileDesc lfd : loadFileWork) { - if (qb.isCTAS()) { - assert (oneLoadFile); // should not have more than 1 load file for - // CTAS - // make the movetask's destination directory the table's destination. - String location = qb.getTableDesc().getLocation(); - if (location == null) { - // get the table's default location - Table dumpTable; - Path targetPath; - try { - dumpTable = db.newTable(qb.getTableDesc().getTableName()); - if (!db.databaseExists(dumpTable.getDbName())) { - throw new SemanticException("ERROR: The database " + dumpTable.getDbName() - + " does not exist."); - } - Warehouse wh = new Warehouse(conf); - targetPath = wh.getTablePath(db.getDatabase(dumpTable.getDbName()), dumpTable - .getTableName()); - } catch (HiveException e) { - throw new SemanticException(e); - } catch (MetaException e) { - throw new SemanticException(e); - } - - location = targetPath.toString(); - } - lfd.setTargetDir(location); - - oneLoadFile = false; - } - mvTask.add(TaskFactory.get(new MoveWork(null, null, null, lfd, false), conf)); - } - } - - // generate map reduce plans - ParseContext tempParseContext = getParseContext(pCtx, rootTasks); - GenMRProcContext procCtx = new GenMRProcContext( - conf, - new HashMap, Task>(), - tempParseContext, mvTask, rootTasks, - new LinkedHashMap, GenMapRedCtx>(), - inputs, outputs); - - // create a walker which walks the tree in a DFS manner while maintaining - // the operator stack. - // The dispatcher generates the plan from the operator tree - Map opRules = new LinkedHashMap(); - opRules.put(new RuleRegExp(new String("R1"), - TableScanOperator.getOperatorName() + "%"), - new GenMRTableScan1()); - opRules.put(new RuleRegExp(new String("R2"), - TableScanOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"), - new GenMRRedSink1()); - opRules.put(new RuleRegExp(new String("R3"), - ReduceSinkOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"), - new GenMRRedSink2()); - opRules.put(new RuleRegExp(new String("R4"), - FileSinkOperator.getOperatorName() + "%"), - new GenMRFileSink1()); - opRules.put(new RuleRegExp(new String("R5"), - UnionOperator.getOperatorName() + "%"), - new GenMRUnion1()); - opRules.put(new RuleRegExp(new String("R6"), - UnionOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"), - new GenMRRedSink3()); - opRules.put(new RuleRegExp(new String("R7"), - MapJoinOperator.getOperatorName() + "%"), - MapJoinFactory.getTableScanMapJoin()); - - // The dispatcher fires the processor corresponding to the closest matching - // rule and passes the context along - Dispatcher disp = new DefaultRuleDispatcher(new GenMROperator(), opRules, - procCtx); - - GraphWalker ogw = new GenMapRedWalker(disp); - ArrayList topNodes = new ArrayList(); - topNodes.addAll(pCtx.getTopOps().values()); - ogw.startWalking(topNodes, null); - - /* - * If the query was the result of analyze table column compute statistics rewrite, create - * a column stats task instead of a fetch task to persist stats to the metastore. - */ - if (isCStats) { - genColumnStatsTask(qb, loadTableWork, loadFileWork, rootTasks); - } - - // reduce sink does not have any kids - since the plan by now has been - // broken up into multiple - // tasks, iterate over all tasks. - // For each task, go over all operators recursively - for (Task rootTask : rootTasks) { - breakTaskTree(rootTask); - } - - // For each task, set the key descriptor for the reducer - for (Task rootTask : rootTasks) { - GenMapRedUtils.setKeyAndValueDescForTaskTree(rootTask); - } - - // If a task contains an operator which instructs bucketizedhiveinputformat - // to be used, please do so - for (Task rootTask : rootTasks) { - setInputFormat(rootTask); - } - - PhysicalContext physicalContext = new PhysicalContext(conf, - getParseContext(pCtx, rootTasks), ctx, rootTasks, pCtx.getFetchTask()); - PhysicalOptimizer physicalOptimizer = new PhysicalOptimizer( - physicalContext, conf); - physicalOptimizer.optimize(); - - decideExecMode(rootTasks, ctx, globalLimitCtx); - - if (qb.isCTAS()) { - // generate a DDL task and make it a dependent task of the leaf - CreateTableDesc crtTblDesc = qb.getTableDesc(); - - crtTblDesc.validate(); - - // Clear the output for CTAS since we don't need the output from the - // mapredWork, the - // DDLWork at the tail of the chain will have the output - outputs.clear(); - - Task crtTblTask = TaskFactory.get(new DDLWork( - inputs, outputs, crtTblDesc), conf); - - // find all leaf tasks and make the DDLTask as a dependent task of all of - // them - HashSet> leaves = new HashSet>(); - getLeafTasks(rootTasks, leaves); - assert (leaves.size() > 0); - for (Task task : leaves) { - if (task instanceof StatsTask) { - // StatsTask require table to already exist - for (Task parentOfStatsTask : task.getParentTasks()) { - parentOfStatsTask.addDependentTask(crtTblTask); - } - for (Task parentOfCrtTblTask : crtTblTask.getParentTasks()) { - parentOfCrtTblTask.removeDependentTask(task); - } - crtTblTask.addDependentTask(task); - } else { - task.addDependentTask(crtTblTask); - } - } - } - - if (globalLimitCtx.isEnable() && pCtx.getFetchTask() != null) { - LOG.info("set least row check for FetchTask: " + globalLimitCtx.getGlobalLimit()); - pCtx.getFetchTask().getWork().setLeastNumRows(globalLimitCtx.getGlobalLimit()); - } - - if (globalLimitCtx.isEnable() && globalLimitCtx.getLastReduceLimitDesc() != null) { - LOG.info("set least row check for LimitDesc: " + globalLimitCtx.getGlobalLimit()); - globalLimitCtx.getLastReduceLimitDesc().setLeastRows(globalLimitCtx.getGlobalLimit()); - List mrTasks = Utilities.getMRTasks(rootTasks); - for (ExecDriver tsk : mrTasks) { - tsk.setRetryCmdWhenFail(true); - } - } - } - - private void setInputFormat(MapWork work, Operator op) { - if (op.isUseBucketizedHiveInputFormat()) { - work.setUseBucketizedHiveInputFormat(true); - return; - } - - if (op.getChildOperators() != null) { - for (Operator childOp : op.getChildOperators()) { - setInputFormat(work, childOp); - } - } - } - // loop over all the tasks recursively - private void setInputFormat(Task task) { + @Override + protected void setInputFormat(Task task) { if (task instanceof ExecDriver) { MapWork work = ((MapredWork) task.getWork()).getMapWork(); HashMap> opMap = work.getAliasToWork(); @@ -389,6 +105,20 @@ } } + private void setInputFormat(MapWork work, Operator op) { + if (op.isUseBucketizedHiveInputFormat()) { + work.setUseBucketizedHiveInputFormat(true); + return; + } + + if (op.getChildOperators() != null) { + for (Operator childOp : op.getChildOperators()) { + setInputFormat(work, childOp); + } + } + } + + @Override public ParseContext getParseContext(ParseContext pCtx, List> rootTasks) { return new ParseContext(conf, pCtx.getQB(), pCtx.getParseTree(), pCtx.getOpToPartPruner(), pCtx.getOpToPartList(), pCtx.getTopOps(), @@ -449,67 +179,6 @@ } /** - * A helper function to generate a column stats task on top of map-red task. The column stats - * task fetches from the output of the map-red task, constructs the column stats object and - * persists it to the metastore. - * - * This method generates a plan with a column stats task on top of map-red task and sets up the - * appropriate metadata to be used during execution. - * - * @param qb - */ - @SuppressWarnings("unchecked") - private void genColumnStatsTask(QB qb, List loadTableWork, - List loadFileWork, List> rootTasks) { - QBParseInfo qbParseInfo = qb.getParseInfo(); - ColumnStatsTask cStatsTask = null; - ColumnStatsWork cStatsWork = null; - FetchWork fetch = null; - String tableName = qbParseInfo.getTableName(); - String partName = qbParseInfo.getPartName(); - List colName = qbParseInfo.getColName(); - List colType = qbParseInfo.getColType(); - boolean isTblLevel = qbParseInfo.isTblLvl(); - - String cols = loadFileWork.get(0).getColumns(); - String colTypes = loadFileWork.get(0).getColumnTypes(); - - String resFileFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT); - TableDesc resultTab = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, resFileFormat); - - fetch = new FetchWork(new Path(loadFileWork.get(0).getSourceDir()).toString(), - resultTab, qb.getParseInfo().getOuterQueryLimit()); - - ColumnStatsDesc cStatsDesc = new ColumnStatsDesc(tableName, partName, - colName, colType, isTblLevel); - cStatsWork = new ColumnStatsWork(fetch, cStatsDesc); - cStatsTask = (ColumnStatsTask) TaskFactory.get(cStatsWork, conf); - rootTasks.add(cStatsTask); - } - - /** - * Find all leaf tasks of the list of root tasks. - */ - private void getLeafTasks(List> rootTasks, - HashSet> leaves) { - - for (Task root : rootTasks) { - getLeafTasks(root, leaves); - } - } - - private void getLeafTasks(Task task, - HashSet> leaves) { - if (task.getDependentTasks() == null) { - if (!leaves.contains(task)) { - leaves.add(task); - } - } else { - getLeafTasks(task.getDependentTasks(), leaves); - } - } - - /** * Make a best guess at trying to find the number of reducers */ private static int getNumberOfReducers(MapredWork mrwork, HiveConf conf) { @@ -524,7 +193,8 @@ return conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS); } - private void decideExecMode(List> rootTasks, Context ctx, + @Override + protected void decideExecMode(List> rootTasks, Context ctx, GlobalLimitCtx globalLimitCtx) throws SemanticException { @@ -600,4 +270,74 @@ console.printInfo("Automatically selecting local only mode for query"); } } + + @Override + protected void optimizeTaskPlan(List> rootTasks, + ParseContext pCtx, Context ctx) throws SemanticException { + // reduce sink does not have any kids - since the plan by now has been + // broken up into multiple + // tasks, iterate over all tasks. + // For each task, go over all operators recursively + for (Task rootTask : rootTasks) { + breakTaskTree(rootTask); + } + + + PhysicalContext physicalContext = new PhysicalContext(conf, + getParseContext(pCtx, rootTasks), ctx, rootTasks, pCtx.getFetchTask()); + PhysicalOptimizer physicalOptimizer = new PhysicalOptimizer( + physicalContext, conf); + physicalOptimizer.optimize(); + + } + + @Override + protected void generateTaskTree(List> rootTasks, ParseContext pCtx, + List> mvTask, Set inputs, Set outputs) throws SemanticException { + + // generate map reduce plans + ParseContext tempParseContext = getParseContext(pCtx, rootTasks); + GenMRProcContext procCtx = new GenMRProcContext( + conf, + new HashMap, Task>(), + tempParseContext, mvTask, rootTasks, + new LinkedHashMap, GenMapRedCtx>(), + inputs, outputs); + + // create a walker which walks the tree in a DFS manner while maintaining + // the operator stack. + // The dispatcher generates the plan from the operator tree + Map opRules = new LinkedHashMap(); + opRules.put(new RuleRegExp(new String("R1"), + TableScanOperator.getOperatorName() + "%"), + new GenMRTableScan1()); + opRules.put(new RuleRegExp(new String("R2"), + TableScanOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"), + new GenMRRedSink1()); + opRules.put(new RuleRegExp(new String("R3"), + ReduceSinkOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"), + new GenMRRedSink2()); + opRules.put(new RuleRegExp(new String("R4"), + FileSinkOperator.getOperatorName() + "%"), + new GenMRFileSink1()); + opRules.put(new RuleRegExp(new String("R5"), + UnionOperator.getOperatorName() + "%"), + new GenMRUnion1()); + opRules.put(new RuleRegExp(new String("R6"), + UnionOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"), + new GenMRRedSink3()); + opRules.put(new RuleRegExp(new String("R7"), + MapJoinOperator.getOperatorName() + "%"), + MapJoinFactory.getTableScanMapJoin()); + + // The dispatcher fires the processor corresponding to the closest matching + // rule and passes the context along + Dispatcher disp = new DefaultRuleDispatcher(new GenMROperator(), opRules, + procCtx); + + GraphWalker ogw = new GenMapRedWalker(disp); + ArrayList topNodes = new ArrayList(); + topNodes.addAll(pCtx.getTopOps().values()); + ogw.startWalking(topNodes, null); + } } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/OptimizeTezProcContext.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/OptimizeTezProcContext.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/OptimizeTezProcContext.java (working copy) @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +import java.util.Deque; +import java.util.HashSet; +import java.util.Set; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +/** + * OptimizeTezProcContext. OptimizeTezProcContext maintains information + * about the current operator plan as we walk the operator tree + * to do some additional optimizations on it. + * + */ +public class OptimizeTezProcContext implements NodeProcessorCtx{ + + public final ParseContext parseContext; + public final HiveConf conf; + + public final Set inputs; + public final Set outputs; + + public final Set visitedReduceSinks + = new HashSet(); + + // rootOperators are all the table scan operators in sequence + // of traversal + public final Deque> rootOperators; + + @SuppressWarnings("unchecked") + public OptimizeTezProcContext(HiveConf conf, ParseContext parseContext, + Set inputs, Set outputs, + Deque> rootOperators) { + + this.conf = conf; + this.parseContext = parseContext; + this.inputs = inputs; + this.outputs = outputs; + this.rootOperators = rootOperators; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -6648,7 +6648,8 @@ ASTNode hint = (ASTNode) hints.getChild(pos); if (((ASTNode) hint.getChild(0)).getToken().getType() == HiveParser.TOK_MAPJOIN) { // the user has specified to ignore mapjoin hint - if (!conf.getBoolVar(HiveConf.ConfVars.HIVEIGNOREMAPJOINHINT)) { + if (!conf.getBoolVar(HiveConf.ConfVars.HIVEIGNOREMAPJOINHINT) + && !conf.getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_TEZ)) { ASTNode hintTblNames = (ASTNode) hint.getChild(1); int numCh = hintTblNames.getChildCount(); for (int tblPos = 0; tblPos < numCh; tblPos++) { @@ -8355,6 +8356,7 @@ // Add a mapping from the table scan operator to Table topToTable.put((TableScanOperator) top, tab); + Map props = qb.getTabPropsForAlias(alias); if (props != null) { topToTableProps.put((TableScanOperator) top, props); @@ -8961,7 +8963,7 @@ if (!ctx.getExplainLogical()) { // At this point we have the complete operator tree // from which we want to create the map-reduce plan - MapReduceCompiler compiler = new MapReduceCompiler(); + TaskCompiler compiler = TaskCompilerFactory.getCompiler(conf, pCtx); compiler.init(conf, console, db); compiler.compile(pCtx, rootTasks, inputs, outputs); fetchTask = pCtx.getFetchTask(); Index: ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java (working copy) @@ -239,7 +239,7 @@ // and filters. while (true) { parentOps = currOp.getParentOperators(); - if (parentOps == null) { + if ((parentOps == null) || (parentOps.isEmpty())) { return (TableScanOperator) currOp; } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java (working copy) @@ -0,0 +1,378 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.exec.ColumnStatsTask; +import org.apache.hadoop.hive.ql.exec.FetchTask; +import org.apache.hadoop.hive.ql.exec.StatsTask; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.mr.ExecDriver; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; +import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc; +import org.apache.hadoop.hive.ql.plan.ColumnStatsWork; +import org.apache.hadoop.hive.ql.plan.CreateTableDesc; +import org.apache.hadoop.hive.ql.plan.DDLWork; +import org.apache.hadoop.hive.ql.plan.FetchWork; +import org.apache.hadoop.hive.ql.plan.LoadFileDesc; +import org.apache.hadoop.hive.ql.plan.LoadTableDesc; +import org.apache.hadoop.hive.ql.plan.MoveWork; +import org.apache.hadoop.hive.ql.plan.PlanUtils; +import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; + +/** + * TaskCompiler is a the base class for classes that compile + * operator pipelines into tasks. + */ +public abstract class TaskCompiler { + + protected final Log LOG = LogFactory.getLog(TezCompiler.class); + + protected Hive db; + protected LogHelper console; + protected HiveConf conf; + + public void init(HiveConf conf, LogHelper console, Hive db) { + this.conf = conf; + this.db = db; + this.console = console; + } + + @SuppressWarnings({"nls", "unchecked"}) + public void compile(final ParseContext pCtx, final List> rootTasks, + final HashSet inputs, final HashSet outputs) throws SemanticException { + + Context ctx = pCtx.getContext(); + GlobalLimitCtx globalLimitCtx = pCtx.getGlobalLimitCtx(); + QB qb = pCtx.getQB(); + List> mvTask = new ArrayList>(); + + List loadTableWork = pCtx.getLoadTableWork(); + List loadFileWork = pCtx.getLoadFileWork(); + + boolean isCStats = qb.isAnalyzeRewrite(); + + if (pCtx.getFetchTask() != null) { + return; + } + + optimizeOperatorPlan(pCtx, inputs, outputs); + + /* + * In case of a select, use a fetch task instead of a move task. + * If the select is from analyze table column rewrite, don't create a fetch task. Instead create + * a column stats task later. + */ + if (pCtx.getQB().getIsQuery() && !isCStats) { + if ((!loadTableWork.isEmpty()) || (loadFileWork.size() != 1)) { + throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg()); + } + + LoadFileDesc loadFileDesc = loadFileWork.get(0); + + String cols = loadFileDesc.getColumns(); + String colTypes = loadFileDesc.getColumnTypes(); + + TableDesc resultTab = pCtx.getFetchTabledesc(); + if (resultTab == null) { + String resFileFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT); + resultTab = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, resFileFormat); + } + + FetchWork fetch = new FetchWork(new Path(loadFileDesc.getSourceDir()).toString(), + resultTab, qb.getParseInfo().getOuterQueryLimit()); + fetch.setSource(pCtx.getFetchSource()); + fetch.setSink(pCtx.getFetchSink()); + + pCtx.setFetchTask((FetchTask) TaskFactory.get(fetch, conf)); + + // For the FetchTask, the limit optimization requires we fetch all the rows + // in memory and count how many rows we get. It's not practical if the + // limit factor is too big + int fetchLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITOPTMAXFETCH); + if (globalLimitCtx.isEnable() && globalLimitCtx.getGlobalLimit() > fetchLimit) { + LOG.info("For FetchTask, LIMIT " + globalLimitCtx.getGlobalLimit() + " > " + fetchLimit + + ". Doesn't qualify limit optimiztion."); + globalLimitCtx.disableOpt(); + } + } else if (!isCStats) { + for (LoadTableDesc ltd : loadTableWork) { + Task tsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); + mvTask.add(tsk); + // Check to see if we are stale'ing any indexes and auto-update them if we want + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEINDEXAUTOUPDATE)) { + IndexUpdater indexUpdater = new IndexUpdater(loadTableWork, inputs, conf); + try { + List> indexUpdateTasks = indexUpdater + .generateUpdateTasks(); + for (Task updateTask : indexUpdateTasks) { + tsk.addDependentTask(updateTask); + } + } catch (HiveException e) { + console + .printInfo("WARNING: could not auto-update stale indexes, which are not in sync"); + } + } + } + + boolean oneLoadFile = true; + for (LoadFileDesc lfd : loadFileWork) { + if (qb.isCTAS()) { + assert (oneLoadFile); // should not have more than 1 load file for + // CTAS + // make the movetask's destination directory the table's destination. + String location = qb.getTableDesc().getLocation(); + if (location == null) { + // get the table's default location + Table dumpTable; + Path targetPath; + try { + dumpTable = db.newTable(qb.getTableDesc().getTableName()); + if (!db.databaseExists(dumpTable.getDbName())) { + throw new SemanticException("ERROR: The database " + dumpTable.getDbName() + + " does not exist."); + } + Warehouse wh = new Warehouse(conf); + targetPath = wh.getTablePath(db.getDatabase(dumpTable.getDbName()), dumpTable + .getTableName()); + } catch (HiveException e) { + throw new SemanticException(e); + } catch (MetaException e) { + throw new SemanticException(e); + } + + location = targetPath.toString(); + } + lfd.setTargetDir(location); + + oneLoadFile = false; + } + mvTask.add(TaskFactory.get(new MoveWork(null, null, null, lfd, false), conf)); + } + } + + generateTaskTree(rootTasks, pCtx, mvTask, inputs, outputs); + + /* + * If the query was the result of analyze table column compute statistics rewrite, create + * a column stats task instead of a fetch task to persist stats to the metastore. + */ + if (isCStats) { + genColumnStatsTask(qb, loadTableWork, loadFileWork, rootTasks); + } + + // For each task, set the key descriptor for the reducer + for (Task rootTask : rootTasks) { + GenMapRedUtils.setKeyAndValueDescForTaskTree(rootTask); + } + + // If a task contains an operator which instructs bucketizedhiveinputformat + // to be used, please do so + for (Task rootTask : rootTasks) { + setInputFormat(rootTask); + } + + optimizeTaskPlan(rootTasks, pCtx, ctx); + + decideExecMode(rootTasks, ctx, globalLimitCtx); + + if (qb.isCTAS()) { + // generate a DDL task and make it a dependent task of the leaf + CreateTableDesc crtTblDesc = qb.getTableDesc(); + + crtTblDesc.validate(); + + // Clear the output for CTAS since we don't need the output from the + // mapredWork, the + // DDLWork at the tail of the chain will have the output + outputs.clear(); + + Task crtTblTask = TaskFactory.get(new DDLWork( + inputs, outputs, crtTblDesc), conf); + + // find all leaf tasks and make the DDLTask as a dependent task of all of + // them + HashSet> leaves = new LinkedHashSet>(); + getLeafTasks(rootTasks, leaves); + assert (leaves.size() > 0); + for (Task task : leaves) { + if (task instanceof StatsTask) { + // StatsTask require table to already exist + for (Task parentOfStatsTask : task.getParentTasks()) { + parentOfStatsTask.addDependentTask(crtTblTask); + } + for (Task parentOfCrtTblTask : crtTblTask.getParentTasks()) { + parentOfCrtTblTask.removeDependentTask(task); + } + crtTblTask.addDependentTask(task); + } else { + task.addDependentTask(crtTblTask); + } + } + } + + if (globalLimitCtx.isEnable() && pCtx.getFetchTask() != null) { + LOG.info("set least row check for FetchTask: " + globalLimitCtx.getGlobalLimit()); + pCtx.getFetchTask().getWork().setLeastNumRows(globalLimitCtx.getGlobalLimit()); + } + + if (globalLimitCtx.isEnable() && globalLimitCtx.getLastReduceLimitDesc() != null) { + LOG.info("set least row check for LimitDesc: " + globalLimitCtx.getGlobalLimit()); + globalLimitCtx.getLastReduceLimitDesc().setLeastRows(globalLimitCtx.getGlobalLimit()); + List mrTasks = Utilities.getMRTasks(rootTasks); + for (ExecDriver tsk : mrTasks) { + tsk.setRetryCmdWhenFail(true); + } + } + } + + + /** + * A helper function to generate a column stats task on top of map-red task. The column stats + * task fetches from the output of the map-red task, constructs the column stats object and + * persists it to the metastore. + * + * This method generates a plan with a column stats task on top of map-red task and sets up the + * appropriate metadata to be used during execution. + * + * @param qb + */ + @SuppressWarnings("unchecked") + protected void genColumnStatsTask(QB qb, List loadTableWork, + List loadFileWork, List> rootTasks) { + QBParseInfo qbParseInfo = qb.getParseInfo(); + ColumnStatsTask cStatsTask = null; + ColumnStatsWork cStatsWork = null; + FetchWork fetch = null; + String tableName = qbParseInfo.getTableName(); + String partName = qbParseInfo.getPartName(); + List colName = qbParseInfo.getColName(); + List colType = qbParseInfo.getColType(); + boolean isTblLevel = qbParseInfo.isTblLvl(); + + String cols = loadFileWork.get(0).getColumns(); + String colTypes = loadFileWork.get(0).getColumnTypes(); + + String resFileFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT); + TableDesc resultTab = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, resFileFormat); + + fetch = new FetchWork(new Path(loadFileWork.get(0).getSourceDir()).toString(), + resultTab, qb.getParseInfo().getOuterQueryLimit()); + + ColumnStatsDesc cStatsDesc = new ColumnStatsDesc(tableName, partName, + colName, colType, isTblLevel); + cStatsWork = new ColumnStatsWork(fetch, cStatsDesc); + cStatsTask = (ColumnStatsTask) TaskFactory.get(cStatsWork, conf); + rootTasks.add(cStatsTask); + } + + + /** + * Find all leaf tasks of the list of root tasks. + */ + protected void getLeafTasks(List> rootTasks, + HashSet> leaves) { + + for (Task root : rootTasks) { + getLeafTasks(root, leaves); + } + } + + private void getLeafTasks(Task task, + HashSet> leaves) { + if (task.getDependentTasks() == null) { + if (!leaves.contains(task)) { + leaves.add(task); + } + } else { + getLeafTasks(task.getDependentTasks(), leaves); + } + } + + /* + * Called to transform tasks into local tasks where possible/desirable + */ + protected abstract void decideExecMode(List> rootTasks, Context ctx, + GlobalLimitCtx globalLimitCtx) throws SemanticException; + + /* + * Called at the beginning of the compile phase to have another chance to optimize the operator plan + */ + protected void optimizeOperatorPlan(ParseContext pCtxSet, Set inputs, + Set outputs) throws SemanticException { + } + + /* + * Called after the tasks have been generated to run another round of optimization + */ + protected abstract void optimizeTaskPlan(List> rootTasks, + ParseContext pCtx, Context ctx) throws SemanticException; + + /* + * Called to set the appropriate input format for tasks + */ + protected abstract void setInputFormat(Task rootTask); + + /* + * Called to generate the taks tree from the parse context/operator tree + */ + protected abstract void generateTaskTree(List> rootTasks, ParseContext pCtx, + List> mvTask, Set inputs, Set outputs) throws SemanticException; + + /** + * Create a clone of the parse context + */ + public ParseContext getParseContext(ParseContext pCtx, List> rootTasks) { + return new ParseContext(conf, pCtx.getQB(), pCtx.getParseTree(), + pCtx.getOpToPartPruner(), pCtx.getOpToPartList(), pCtx.getTopOps(), + pCtx.getTopSelOps(), pCtx.getOpParseCtx(), pCtx.getJoinContext(), + pCtx.getSmbMapJoinContext(), pCtx.getTopToTable(), pCtx.getTopToProps(), + pCtx.getFsopToTable(), + pCtx.getLoadTableWork(), pCtx.getLoadFileWork(), pCtx.getContext(), + pCtx.getIdToTableNameMap(), pCtx.getDestTableId(), pCtx.getUCtx(), + pCtx.getListMapJoinOpsNoReducer(), pCtx.getGroupOpToInputTables(), + pCtx.getPrunedPartitions(), pCtx.getOpToSamplePruner(), pCtx.getGlobalLimitCtx(), + pCtx.getNameToSplitSample(), pCtx.getSemanticInputs(), rootTasks, + pCtx.getOpToPartToSkewedPruner(), pCtx.getViewAliasToInput(), + pCtx.getReduceSinkOperatorsAddedByEnforceBucketingSorting(), + pCtx.getQueryProperties()); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompilerFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompilerFactory.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompilerFactory.java (working copy) @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +import org.apache.hadoop.hive.conf.HiveConf; + +/** + * TaskCompilerFactory is a factory class to choose the appropriate + * TaskCompiler. + */ +public class TaskCompilerFactory { + + private TaskCompilerFactory() { + // avoid instantiation + } + + /** + * Returns the appropriate compiler to translate the operator tree + * into executable units. + */ + public static TaskCompiler getCompiler(HiveConf conf, ParseContext parseContext) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_OPTIMIZE_TEZ) + && !parseContext.getQB().getParseInfo().isAnalyzeCommand()) { + return new TezCompiler(); + } else { + return new MapReduceCompiler(); + } + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java (working copy) @@ -0,0 +1,215 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Deque; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Stack; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.exec.ConditionalTask; +import org.apache.hadoop.hive.ql.exec.FileSinkOperator; +import org.apache.hadoop.hive.ql.exec.JoinOperator; +import org.apache.hadoop.hive.ql.exec.MapJoinOperator; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.UnionOperator; +import org.apache.hadoop.hive.ql.exec.tez.TezTask; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.lib.CompositeProcessor; +import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher; +import org.apache.hadoop.hive.ql.lib.Dispatcher; +import org.apache.hadoop.hive.ql.lib.GraphWalker; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.lib.Rule; +import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.optimizer.ConvertJoinMapJoin; +import org.apache.hadoop.hive.ql.optimizer.ReduceSinkMapJoinProc; +import org.apache.hadoop.hive.ql.optimizer.SetReducerParallelism; +import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext; +import org.apache.hadoop.hive.ql.optimizer.physical.Vectorizer; +import org.apache.hadoop.hive.ql.plan.BaseWork; +import org.apache.hadoop.hive.ql.plan.MapWork; +import org.apache.hadoop.hive.ql.plan.MoveWork; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.TezWork; + +/** + * TezCompiler translates the operator plan into TezTasks. + */ +public class TezCompiler extends TaskCompiler { + + protected final Log LOG = LogFactory.getLog(TezCompiler.class); + + public TezCompiler() { + } + + @Override + protected void optimizeOperatorPlan(ParseContext pCtx, Set inputs, + Set outputs) throws SemanticException { + + // Sequence of TableScan operators to be walked + Deque> deque = new LinkedList>(); + deque.addAll(pCtx.getTopOps().values()); + + // Create the context for the walker + OptimizeTezProcContext procCtx + = new OptimizeTezProcContext(conf, pCtx, inputs, outputs, deque); + + // create a walker which walks the tree in a DFS manner while maintaining + // the operator stack. + Map opRules = new LinkedHashMap(); + opRules.put(new RuleRegExp(new String("Set parallelism - ReduceSink"), + ReduceSinkOperator.getOperatorName() + "%"), + new SetReducerParallelism()); + + opRules.put(new RuleRegExp(new String("Convert Join to Map-join"), + JoinOperator.getOperatorName() + "%"), new ConvertJoinMapJoin()); + + // The dispatcher fires the processor corresponding to the closest matching + // rule and passes the context along + Dispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx); + List topNodes = new ArrayList(); + topNodes.addAll(pCtx.getTopOps().values()); + GraphWalker ogw = new TezWalker(disp); + ogw.startWalking(topNodes, null); + } + + @Override + protected void generateTaskTree(List> rootTasks, ParseContext pCtx, + List> mvTask, Set inputs, Set outputs) + throws SemanticException { + + ParseContext tempParseContext = getParseContext(pCtx, rootTasks); + GenTezWork genTezWork = new GenTezWork(); + + GenTezProcContext procCtx = new GenTezProcContext( + conf, tempParseContext, mvTask, rootTasks, inputs, outputs); + + // create a walker which walks the tree in a DFS manner while maintaining + // the operator stack. + // The dispatcher generates the plan from the operator tree + Map opRules = new LinkedHashMap(); + opRules.put(new RuleRegExp("Split Work - ReduceSink", + ReduceSinkOperator.getOperatorName() + "%"), + genTezWork); + + opRules.put(new RuleRegExp("No more walking on ReduceSink-MapJoin", + ReduceSinkOperator.getOperatorName() + "%" + + MapJoinOperator.getOperatorName() + "%"), new ReduceSinkMapJoinProc()); + + opRules.put(new RuleRegExp("Split Work + Move/Merge - FileSink", + FileSinkOperator.getOperatorName() + "%"), + new CompositeProcessor(new FileSinkProcessor(), genTezWork)); + + opRules.put(new RuleRegExp("Bail on Union", + UnionOperator.getOperatorName() + "%"), new NodeProcessor() + { + @Override + public Object process(Node n, Stack s, + NodeProcessorCtx procCtx, Object... os) throws SemanticException { + throw new SemanticException("Unions not yet supported on Tez." + +" Please use MR for this query"); + } + }); + + // The dispatcher fires the processor corresponding to the closest matching + // rule and passes the context along + Dispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx); + List topNodes = new ArrayList(); + topNodes.addAll(pCtx.getTopOps().values()); + GraphWalker ogw = new GenTezWorkWalker(disp, procCtx); + ogw.startWalking(topNodes, null); + } + + @Override + protected void setInputFormat(Task task) { + if (task instanceof TezTask) { + TezWork work = ((TezTask)task).getWork(); + Set roots = work.getRoots(); + for (BaseWork w: roots) { + assert w instanceof MapWork; + MapWork mapWork = (MapWork)w; + HashMap> opMap = mapWork.getAliasToWork(); + if (!opMap.isEmpty()) { + for (Operator op : opMap.values()) { + setInputFormat(mapWork, op); + } + } + } + } else if (task instanceof ConditionalTask) { + List> listTasks + = ((ConditionalTask) task).getListTasks(); + for (Task tsk : listTasks) { + setInputFormat(tsk); + } + } + + if (task.getChildTasks() != null) { + for (Task childTask : task.getChildTasks()) { + setInputFormat(childTask); + } + } + } + + private void setInputFormat(MapWork work, Operator op) { + if (op.isUseBucketizedHiveInputFormat()) { + work.setUseBucketizedHiveInputFormat(true); + return; + } + + if (op.getChildOperators() != null) { + for (Operator childOp : op.getChildOperators()) { + setInputFormat(work, childOp); + } + } + } + + @Override + protected void decideExecMode(List> rootTasks, Context ctx, + GlobalLimitCtx globalLimitCtx) + throws SemanticException { + // currently all Tez work is on the cluster + return; + } + + @Override + protected void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, + Context ctx) throws SemanticException { + PhysicalContext physicalCtx = new PhysicalContext(conf, pCtx, pCtx.getContext(), rootTasks, + pCtx.getFetchTask()); + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED)) { + (new Vectorizer()).resolve(physicalCtx); + } + return; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/parse/TezWalker.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/TezWalker.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TezWalker.java (working copy) @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +import java.util.List; + +import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; +import org.apache.hadoop.hive.ql.lib.Dispatcher; +import org.apache.hadoop.hive.ql.lib.Node; + +/** + * Walks the operator tree in DFS fashion. + */ +public class TezWalker extends DefaultGraphWalker { + + /** + * constructor of the walker - the dispatcher is passed. + * + * @param disp + * the dispatcher to be called for each node visited + */ + public TezWalker(Dispatcher disp) { + super(disp); + } + + /** + * Walk the given operator. + * + * @param nd + * operator being walked + */ + @Override + public void walk(Node nd) throws SemanticException { + List children = nd.getChildren(); + + // maintain the stack of operators encountered + opStack.push(nd); + Boolean skip = dispatchAndReturn(nd, opStack); + + if (skip == null || !skip) { + // move all the children to the front of queue + for (Node ch : children) { + walk(ch); + } + } + + // done with this operator + opStack.pop(); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java (working copy) @@ -19,10 +19,10 @@ package org.apache.hadoop.hive.ql.plan; import java.util.ArrayList; -import java.util.HashMap; +import java.util.LinkedList; import java.util.List; -import java.util.Map; +import org.apache.hadoop.hive.ql.exec.HashTableDummyOperator; import org.apache.hadoop.hive.ql.exec.Operator; /** @@ -32,8 +32,22 @@ @SuppressWarnings({"serial", "deprecation"}) public abstract class BaseWork extends AbstractOperatorDesc { + // dummyOps is a reference to all the HashTableDummy operators in the + // plan. These have to be separately initialized when we setup a task. + // Their funtion is mainly as root ops to give the mapjoin the correct + // schema info. + List dummyOps; + + public BaseWork() {} + + public BaseWork(String name) { + setName(name); + } + private boolean gatheringStats; + private String name; + public void setGatheringStats(boolean gatherStats) { this.gatheringStats = gatherStats; } @@ -42,6 +56,29 @@ return this.gatheringStats; } + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public List getDummyOps() { + return dummyOps; + } + + public void setDummyOps(List dummyOps) { + this.dummyOps = dummyOps; + } + + public void addDummyOp(HashTableDummyOperator dummyOp) { + if (dummyOps == null) { + dummyOps = new LinkedList(); + } + dummyOps.add(dummyOp); + } + protected abstract List> getAllRootOperators(); public List> getAllOperators() { Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java (working copy) @@ -147,6 +147,8 @@ MapWork work; if (mrTask.getWork() instanceof MapredWork) { work = ((MapredWork) mrTask.getWork()).getMapWork(); + } else if (mrTask.getWork() instanceof TezWork){ + work = (MapWork) ((TezWork) mrTask.getWork()).getAllWork().get(0); } else { work = (MapWork) mrTask.getWork(); } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java (working copy) @@ -187,6 +187,7 @@ this.tagOrder = clone.tagOrder; this.filters = clone.filters; this.filterMap = clone.filterMap; + this.statistics = clone.statistics; } public Map> getExprs() { Index: ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java (working copy) @@ -47,6 +47,9 @@ private transient String bigTableAlias; + // for tez. used to remember which position maps to which logical input + private Map parentToInput = new HashMap(); + // table alias (small) --> input file name (big) --> target file names (small) private Map>> aliasBucketFileNameMapping; private Map bigTableBucketNumMapping; @@ -74,6 +77,7 @@ this.bigTableBucketNumMapping = clone.bigTableBucketNumMapping; this.bigTablePartSpecToFileMapping = clone.bigTablePartSpecToFileMapping; this.dumpFilePrefix = clone.dumpFilePrefix; + this.parentToInput = clone.parentToInput; } public MapJoinDesc(final Map> keys, @@ -106,6 +110,14 @@ } } + public Map getParentToInput() { + return parentToInput; + } + + public void setParentToInput(Map parentToInput) { + this.parentToInput = parentToInput; + } + public Map> getRetainList() { return retainList; } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java (working copy) @@ -116,8 +116,10 @@ private Map> scratchColumnMap = null; private boolean vectorMode = false; + public MapWork() {} - public MapWork() { + public MapWork(String name) { + super(name); } @Explain(displayName = "Path -> Alias", normalExplain = false) Index: ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java (working copy) @@ -29,7 +29,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.exec.ColumnInfo; @@ -386,14 +388,34 @@ /** * Generate the table descriptor for Map-side join key. */ - public static TableDesc getMapJoinKeyTableDesc(List fieldSchemas) { - return new TableDesc(SequenceFileInputFormat.class, - SequenceFileOutputFormat.class, Utilities.makeProperties("columns", - MetaStoreUtils.getColumnNamesFromFieldSchema(fieldSchemas), - "columns.types", MetaStoreUtils - .getColumnTypesFromFieldSchema(fieldSchemas), - serdeConstants.ESCAPE_CHAR, "\\", - serdeConstants.SERIALIZATION_LIB,LazyBinarySerDe.class.getName())); + public static TableDesc getMapJoinKeyTableDesc(Configuration conf, + List fieldSchemas) { + if (HiveConf.getBoolVar(conf, ConfVars.HIVE_OPTIMIZE_TEZ)) { + // In tez we use a different way of transmitting the hash table. + // We basically use ReduceSinkOperators and set the transfer to + // be broadcast (instead of partitioned). As a consequence we use + // a different SerDe than in the MR mapjoin case. + StringBuffer order = new StringBuffer(); + for (FieldSchema f: fieldSchemas) { + order.append("+"); + } + return new TableDesc( + SequenceFileInputFormat.class, SequenceFileOutputFormat.class, + Utilities.makeProperties(serdeConstants.LIST_COLUMNS, MetaStoreUtils + .getColumnNamesFromFieldSchema(fieldSchemas), + serdeConstants.LIST_COLUMN_TYPES, MetaStoreUtils + .getColumnTypesFromFieldSchema(fieldSchemas), + serdeConstants.SERIALIZATION_SORT_ORDER, order.toString(), + serdeConstants.SERIALIZATION_LIB, BinarySortableSerDe.class.getName())); + } else { + return new TableDesc(SequenceFileInputFormat.class, + SequenceFileOutputFormat.class, Utilities.makeProperties("columns", + MetaStoreUtils.getColumnNamesFromFieldSchema(fieldSchemas), + "columns.types", MetaStoreUtils + .getColumnTypesFromFieldSchema(fieldSchemas), + serdeConstants.ESCAPE_CHAR, "\\", + serdeConstants.SERIALIZATION_LIB,LazyBinarySerDe.class.getName())); + } } /** @@ -401,13 +423,14 @@ */ public static TableDesc getMapJoinValueTableDesc( List fieldSchemas) { - return new TableDesc(SequenceFileInputFormat.class, - SequenceFileOutputFormat.class, Utilities.makeProperties("columns", - MetaStoreUtils.getColumnNamesFromFieldSchema(fieldSchemas), - "columns.types", MetaStoreUtils - .getColumnTypesFromFieldSchema(fieldSchemas), - serdeConstants.ESCAPE_CHAR, "\\", - serdeConstants.SERIALIZATION_LIB,LazyBinarySerDe.class.getName())); + return new TableDesc(SequenceFileInputFormat.class, + SequenceFileOutputFormat.class, Utilities.makeProperties( + serdeConstants.LIST_COLUMNS, MetaStoreUtils + .getColumnNamesFromFieldSchema(fieldSchemas), + serdeConstants.LIST_COLUMN_TYPES, MetaStoreUtils + .getColumnTypesFromFieldSchema(fieldSchemas), + serdeConstants.ESCAPE_CHAR, "\\", + serdeConstants.SERIALIZATION_LIB,LazyBinarySerDe.class.getName())); } /** Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java (working copy) @@ -60,6 +60,12 @@ private int numDistributionKeys; /** + * Used in tez. Holds the name of the output + * that this reduce sink is writing to. + */ + private String outputName; + + /** * The partition columns (CLUSTER BY or DISTRIBUTE BY in Hive language). * Partition columns decide the reducer that the current row goes to. * Partition columns are not passed to reducer. @@ -273,4 +279,12 @@ List> distinctColumnIndices) { this.distinctColumnIndices = distinctColumnIndices; } + + public String getOutputName() { + return outputName; + } + + public void setOutputName(String outputName) { + this.outputName = outputName; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java (working copy) @@ -19,7 +19,9 @@ package org.apache.hadoop.hive.ql.plan; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -44,6 +46,12 @@ @SuppressWarnings({"serial", "deprecation"}) public class ReduceWork extends BaseWork { + public ReduceWork() {} + + public ReduceWork(String name) { + super(name); + } + private static transient final Log LOG = LogFactory.getLog(ReduceWork.class); // schema of the map-reduce 'key' object - this is homogeneous @@ -63,6 +71,8 @@ // not (e.g.: group by) private boolean needsTagging; + private Map tagToInput = new HashMap(); + /** * If the plan has a reducer and correspondingly a reduce-sink, then store the TableDesc pointing * to keySerializeInfo of the ReduceSink @@ -103,6 +113,14 @@ this.needsTagging = needsTagging; } + public void setTagToInput(final Map tagToInput) { + this.tagToInput = tagToInput; + } + + public Map getTagToInput() { + return tagToInput; + } + @Override protected List> getAllRootOperators() { ArrayList> opList = new ArrayList>(); Index: ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java (working copy) @@ -0,0 +1,223 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +/** + * TezWork. This class encapsulates all the work objects that can be executed + * in a single tez job. Currently it's basically a tree with MapWork at the + * leaves and and ReduceWork in all other nodes. + * + */ +@SuppressWarnings("serial") +@Explain(displayName = "Tez") +public class TezWork extends AbstractOperatorDesc { + + public enum EdgeType { + SIMPLE_EDGE, + BROADCAST_EDGE + } + + private static transient final Log LOG = LogFactory.getLog(TezWork.class); + + private final Set roots = new HashSet(); + private final Set leaves = new HashSet(); + private final Map> workGraph = new HashMap>(); + private final Map> invertedWorkGraph = new HashMap>(); + private final Map, EdgeType> edgeProperties = + new HashMap, EdgeType>(); + + /** + * getAllWork returns a topologically sorted list of BaseWork + */ + @Explain(skipHeader = true, displayName = "Tez Work") + public List getAllWork() { + + List result = new LinkedList(); + Set seen = new HashSet(); + + for (BaseWork leaf: leaves) { + // make sure all leaves are visited at least once + visit(leaf, seen, result); + } + + return result; + } + + private void visit(BaseWork child, Set seen, List result) { + + if (seen.contains(child)) { + // don't visit multiple times + return; + } + + seen.add(child); + + for (BaseWork parent: getParents(child)) { + if (!seen.contains(parent)) { + visit(parent, seen, result); + } + } + + result.add(child); + } + + /** + * add all nodes in the collection without any connections + */ + public void addAll(Collection c) { + for (BaseWork w: c) { + this.add(w); + } + } + + /** + * add all nodes in the collection without any connections + */ + public void addAll(BaseWork[] bws) { + for (BaseWork w: bws) { + this.add(w); + } + } + + /** + * add creates a new node in the graph without any connections + */ + public void add(BaseWork w) { + if (workGraph.containsKey(w)) { + return; + } + workGraph.put(w, new LinkedList()); + invertedWorkGraph.put(w, new LinkedList()); + roots.add(w); + leaves.add(w); + } + + /** + * connect adds an edge between a and b. Both nodes have + * to be added prior to calling connect. + */ + public void connect(BaseWork a, BaseWork b, EdgeType edgeType) { + workGraph.get(a).add(b); + invertedWorkGraph.get(b).add(a); + roots.remove(b); + leaves.remove(a); + ImmutablePair workPair = new ImmutablePair(a, b); + edgeProperties.put(workPair, edgeType); + } + + /** + * disconnect removes an edge between a and b. Both a and + * b have to be in the graph. If there is no matching edge + * no change happens. + */ + public void disconnect(BaseWork a, BaseWork b) { + workGraph.get(a).remove(b); + invertedWorkGraph.get(b).remove(a); + if (getParents(b).isEmpty()) { + roots.add(b); + } + if (getChildren(a).isEmpty()) { + leaves.add(a); + } + } + + /** + * getRoots returns all nodes that do not have a parent. + */ + public Set getRoots() { + return new HashSet(roots); + } + + /** + * getLeaves returns all nodes that do not have a child + */ + public Set getLeaves() { + return new HashSet(leaves); + } + + /** + * getParents returns all the nodes with edges leading into work + */ + public List getParents(BaseWork work) { + assert invertedWorkGraph.containsKey(work) + && invertedWorkGraph.get(work) != null; + return new LinkedList(invertedWorkGraph.get(work)); + } + + /** + * getChildren returns all the nodes with edges leading out of work + */ + public List getChildren(BaseWork work) { + assert workGraph.containsKey(work) + && workGraph.get(work) != null; + return new LinkedList(workGraph.get(work)); + } + + /** + * remove removes a node from the graph and removes all edges with + * work as start or end point. No change to the graph if the node + * doesn't exist. + */ + public void remove(BaseWork work) { + + if (!workGraph.containsKey(work)) { + return; + } + + List children = getChildren(work); + List parents = getParents(work); + + for (BaseWork w: children) { + invertedWorkGraph.get(w).remove(work); + if (invertedWorkGraph.get(w).size() == 0) { + roots.add(w); + } + } + + for (BaseWork w: parents) { + workGraph.get(w).remove(work); + if (workGraph.get(w).size() == 0) { + leaves.add(w); + } + } + + roots.remove(work); + leaves.remove(work); + + workGraph.remove(work); + invertedWorkGraph.remove(work); + } + + public EdgeType getEdgeProperty(BaseWork a, BaseWork b) { + return edgeProperties.get(new ImmutablePair(a,b)); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (working copy) @@ -44,6 +44,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.MapRedStats; import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.tez.TezSessionState; import org.apache.hadoop.hive.ql.history.HiveHistory; import org.apache.hadoop.hive.ql.history.HiveHistoryImpl; import org.apache.hadoop.hive.ql.history.HiveHistoryProxyHandler; @@ -136,6 +137,8 @@ private Map> localMapRedErrors; + private TezSessionState tezSessionState; + private String currentDatabase; /** @@ -307,6 +310,19 @@ throw new RuntimeException(e); } + if (HiveConf.getBoolVar(startSs.getConf(), HiveConf.ConfVars.HIVE_OPTIMIZE_TEZ)) { + try { + if (startSs.tezSessionState == null) { + startSs.tezSessionState = new TezSessionState(); + } + startSs.tezSessionState.open(startSs.getSessionId(), startSs.conf); + } catch (Exception e) { + throw new RuntimeException(e); + } + } else { + LOG.info("No Tez session required at this point. hive.optimize.tez is false."); + } + return startSs; } @@ -813,6 +829,16 @@ } catch (IOException e) { LOG.info("Error removing session resource dir " + resourceDir, e); } + + try { + if (tezSessionState != null) { + tezSessionState.close(false); + } + } catch (Exception e) { + LOG.info("Error closing tez session", e); + } finally { + tezSessionState = null; + } } /** @@ -835,4 +861,11 @@ return perfLogger; } + public TezSessionState getTezSession() { + return tezSessionState; + } + + public void setTezSession(TezSessionState session) { + this.tezSessionState = session; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java (working copy) @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.mr.ExecDriver; +import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; import org.apache.hadoop.mapred.Counters; import org.apache.hadoop.mapred.JobClient; @@ -42,7 +43,7 @@ public boolean connect(Configuration hconf, Task sourceTask) { try { jc = new JobClient(toJobConf(hconf)); - RunningJob job = jc.getJob(sourceTask.getJobID()); + RunningJob job = jc.getJob(((MapRedTask)sourceTask).getJobID()); if (job != null) { counters = job.getCounters(); } Index: ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorTez.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorTez.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorTez.java (working copy) @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.stats; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.tez.TezTask; +import org.apache.tez.common.counters.TezCounters; + +public class CounterStatsAggregatorTez implements StatsAggregator { + + private static final Log LOG = LogFactory.getLog(CounterStatsAggregatorTez.class.getName()); + + private TezCounters counters; + private CounterStatsAggregator mrAggregator; + private boolean delegate; + + public CounterStatsAggregatorTez() { + mrAggregator = new CounterStatsAggregator(); + } + + @Override + public boolean connect(Configuration hconf, Task sourceTask) { + if (!(sourceTask instanceof TezTask)) { + delegate = true; + return mrAggregator.connect(hconf, sourceTask); + } + counters = ((TezTask) sourceTask).getTezCounters(); + return counters != null; + } + + @Override + public String aggregateStats(String keyPrefix, String statType) { + if (delegate) { + return mrAggregator.aggregateStats(keyPrefix, statType); + } + + long value = 0; + for (String groupName : counters.getGroupNames()) { + if (groupName.startsWith(keyPrefix)) { + value += counters.getGroup(groupName).findCounter(statType).getValue(); + } + } + return String.valueOf(value); + } + + @Override + public boolean closeConnection() { + return true; + } + + @Override + public boolean cleanUp(String keyPrefix) { + return true; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java (working copy) @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; /** * An interface for any possible implementation for gathering statistics. Index: ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java (revision 1553449) +++ ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java (working copy) @@ -33,7 +33,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.stats.StatsAggregator; @@ -42,7 +41,7 @@ private Connection conn; private String connectionString; private Configuration hiveconf; - private MapRedTask sourceTask; + private Task sourceTask; private final Map columnMapping; private final Log LOG = LogFactory.getLog(this.getClass().getName()); private int timeout = 30; Index: ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java (revision 1553449) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java (working copy) @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.io.IOContext; import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory; import org.apache.hadoop.hive.ql.plan.CollectDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; @@ -313,6 +314,7 @@ Configuration hconf = new JobConf(TestOperators.class); HiveConf.setVar(hconf, HiveConf.ConfVars.HADOOPMAPFILENAME, "hdfs:///testDir/testFile"); + IOContext.get().setInputPath(new Path("hdfs:///testDir/testFile")); // initialize pathToAliases ArrayList aliases = new ArrayList(); Index: ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java (revision 0) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java (working copy) @@ -0,0 +1,217 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.BaseWork; +import org.apache.hadoop.hive.ql.plan.MapWork; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.ReduceWork; +import org.apache.hadoop.hive.ql.plan.TezWork; +import org.apache.hadoop.hive.ql.plan.TezWork.EdgeType; +import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.tez.client.TezSession; +import org.apache.tez.dag.api.DAG; +import org.apache.tez.dag.api.Edge; +import org.apache.tez.dag.api.EdgeProperty; +import org.apache.tez.dag.api.ProcessorDescriptor; +import org.apache.tez.dag.api.SessionNotRunning; +import org.apache.tez.dag.api.TezException; +import org.apache.tez.dag.api.Vertex; +import org.apache.tez.dag.api.client.DAGClient; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +public class TestTezTask { + + DagUtils utils; + MapWork[] mws; + ReduceWork[] rws; + TezWork work; + TezTask task; + TezSession session; + TezSessionState sessionState; + JobConf conf; + LocalResource appLr; + Operator op; + Path path; + FileSystem fs; + + @SuppressWarnings("unchecked") + @Before + public void setUp() throws Exception { + utils = mock(DagUtils.class); + fs = mock(FileSystem.class); + path = mock(Path.class); + when(path.getFileSystem(any(Configuration.class))).thenReturn(fs); + when(utils.getTezDir(any(Path.class))).thenReturn(path); + when(utils.createVertex(any(JobConf.class), any(BaseWork.class), any(Path.class), any(LocalResource.class), + any(List.class), any(FileSystem.class), any(Context.class), anyBoolean())).thenAnswer(new Answer() { + + @Override + public Vertex answer(InvocationOnMock invocation) throws Throwable { + Object[] args = invocation.getArguments(); + return new Vertex(((BaseWork)args[1]).getName(), + mock(ProcessorDescriptor.class), 0, mock(Resource.class)); + } + }); + + when(utils.createEdge(any(JobConf.class), any(Vertex.class), any(JobConf.class), + any(Vertex.class), any(EdgeType.class))).thenAnswer(new Answer() { + + @Override + public Edge answer(InvocationOnMock invocation) throws Throwable { + Object[] args = invocation.getArguments(); + return new Edge((Vertex)args[1], (Vertex)args[3], mock(EdgeProperty.class)); + } + }); + + work = new TezWork(); + + mws = new MapWork[] { new MapWork(), new MapWork()}; + rws = new ReduceWork[] { new ReduceWork(), new ReduceWork() }; + + work.addAll(mws); + work.addAll(rws); + + int i = 0; + for (BaseWork w: work.getAllWork()) { + w.setName("Work "+(++i)); + } + + op = mock(Operator.class); + + LinkedHashMap> map + = new LinkedHashMap>(); + map.put("foo", op); + mws[0].setAliasToWork(map); + mws[1].setAliasToWork(map); + + LinkedHashMap> pathMap + = new LinkedHashMap>(); + ArrayList aliasList = new ArrayList(); + aliasList.add("foo"); + pathMap.put("foo", aliasList); + + mws[0].setPathToAliases(pathMap); + mws[1].setPathToAliases(pathMap); + + rws[0].setReducer(op); + rws[1].setReducer(op); + + work.connect(mws[0], rws[0], EdgeType.SIMPLE_EDGE); + work.connect(mws[1], rws[0], EdgeType.SIMPLE_EDGE); + work.connect(rws[0], rws[1], EdgeType.SIMPLE_EDGE); + + task = new TezTask(utils); + task.setWork(work); + task.setConsole(mock(LogHelper.class)); + + conf = new JobConf(); + appLr = mock(LocalResource.class); + + session = mock(TezSession.class); + sessionState = mock(TezSessionState.class); + when(sessionState.getSession()).thenReturn(session); + when(session.submitDAG(any(DAG.class))).thenThrow(new SessionNotRunning("")) + .thenReturn(mock(DAGClient.class)); + } + + @After + public void tearDown() throws Exception { + utils = null; + work = null; + task = null; + path = null; + fs = null; + } + + @Test + public void testBuildDag() throws IllegalArgumentException, IOException, Exception { + DAG dag = task.build(conf, work, path, appLr, new Context(conf)); + for (BaseWork w: work.getAllWork()) { + Vertex v = dag.getVertex(w.getName()); + assertNotNull(v); + List outs = v.getOutputVertices(); + for (BaseWork x: work.getChildren(w)) { + boolean found = false; + for (Vertex u: outs) { + if (u.getVertexName().equals(x.getName())) { + found = true; + break; + } + } + assertTrue(found); + } + } + } + + @Test + public void testEmptyWork() throws IllegalArgumentException, IOException, Exception { + DAG dag = task.build(conf, new TezWork(), path, appLr, new Context(conf)); + assertEquals(dag.getVertices().size(), 0); + } + + @Test + public void testSubmit() throws LoginException, IllegalArgumentException, + IOException, TezException, InterruptedException, URISyntaxException, HiveException { + DAG dag = new DAG("test"); + task.submit(conf, dag, path, appLr, sessionState); + // validate close/reopen + verify(sessionState, times(1)).open(any(String.class), any(HiveConf.class)); + verify(sessionState, times(1)).close(eq(true)); + verify(session, times(2)).submitDAG(any(DAG.class)); + } + + @Test + public void testClose() throws HiveException { + task.close(work, 0); + verify(op, times(4)).jobClose(any(Configuration.class), eq(true)); + } +} Index: ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java (revision 1553449) +++ ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java (working copy) @@ -19,6 +19,7 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.DataInput; import java.io.DataOutput; @@ -425,8 +426,8 @@ OrcInputFormat.Context context = new OrcInputFormat.Context(conf); OrcInputFormat.SplitGenerator splitter = new OrcInputFormat.SplitGenerator(context, fs, - fs.getFileStatus(new Path("/a/file"))); - splitter.createSplit(0, 200); + fs.getFileStatus(new Path("/a/file")), null); + splitter.createSplit(0, 200, null); FileSplit result = context.getResult(-1); assertEquals(0, result.getStart()); assertEquals(200, result.getLength()); @@ -436,7 +437,7 @@ assertEquals("host1-1", locs[0]); assertEquals("host1-2", locs[1]); assertEquals("host1-3", locs[2]); - splitter.createSplit(500, 600); + splitter.createSplit(500, 600, null); result = context.getResult(-1); locs = result.getLocations(); assertEquals(3, locs.length); @@ -443,7 +444,7 @@ assertEquals("host2-1", locs[0]); assertEquals("host0", locs[1]); assertEquals("host2-3", locs[2]); - splitter.createSplit(0, 2500); + splitter.createSplit(0, 2500, null); result = context.getResult(-1); locs = result.getLocations(); assertEquals(1, locs.length); @@ -468,7 +469,7 @@ OrcInputFormat.Context context = new OrcInputFormat.Context(conf); OrcInputFormat.SplitGenerator splitter = new OrcInputFormat.SplitGenerator(context, fs, - fs.getFileStatus(new Path("/a/file"))); + fs.getFileStatus(new Path("/a/file")), null); splitter.run(); if (context.getErrors().size() > 0) { for(Throwable th: context.getErrors()) { @@ -496,7 +497,7 @@ conf.setInt(OrcInputFormat.MAX_SPLIT_SIZE, 0); context = new OrcInputFormat.Context(conf); splitter = new OrcInputFormat.SplitGenerator(context, fs, - fs.getFileStatus(new Path("/a/file"))); + fs.getFileStatus(new Path("/a/file")), null); splitter.run(); if (context.getErrors().size() > 0) { for(Throwable th: context.getErrors()) { @@ -562,7 +563,6 @@ IntObjectInspector intInspector = (IntObjectInspector) fields.get(0).getFieldObjectInspector(); assertEquals(0.0, reader.getProgress(), 0.00001); - assertEquals(3, reader.getPos()); while (reader.next(key, value)) { assertEquals(++rowNum, intInspector.get(inspector. getStructFieldData(serde.deserialize(value), fields.get(0)))); @@ -697,7 +697,7 @@ InputFormat in = new OrcInputFormat(); FileInputFormat.setInputPaths(conf, testFilePath.toString()); InputSplit[] splits = in.getSplits(conf, 1); - assertEquals(0, splits.length); + assertTrue(1 == splits.length); assertEquals(null, serde.getSerDeStats()); } Index: ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java (revision 0) +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java (working copy) @@ -0,0 +1,171 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.FileSinkOperator; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; +import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.plan.BaseWork; +import org.apache.hadoop.hive.ql.plan.FileSinkDesc; +import org.apache.hadoop.hive.ql.plan.MapWork; +import org.apache.hadoop.hive.ql.plan.MoveWork; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; +import org.apache.hadoop.hive.ql.plan.ReduceWork; +import org.apache.hadoop.hive.ql.plan.TableScanDesc; +import org.apache.hadoop.hive.ql.plan.TezWork; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Tests for GenTezWork. + * + */ +public class TestGenTezWork { + + GenTezProcContext ctx; + GenTezWork proc; + ReduceSinkOperator rs; + FileSinkOperator fs; + TableScanOperator ts; + + /** + * @throws java.lang.Exception + */ + @SuppressWarnings("unchecked") + @Before + public void setUp() throws Exception { + ctx = new GenTezProcContext( + new HiveConf(), + new ParseContext(), + (List>)Collections.EMPTY_LIST, + (List>) new ArrayList>(), + (Set)Collections.EMPTY_SET, + (Set)Collections.EMPTY_SET); + + proc = new GenTezWork() { + @Override + protected void setupMapWork(MapWork mapWork, GenTezProcContext context, + Operator root, String alias) throws SemanticException { + LinkedHashMap> map + = new LinkedHashMap>(); + map.put("foo", root); + mapWork.setAliasToWork(map); + return; + } + }; + + fs = new FileSinkOperator(); + fs.setConf(new FileSinkDesc()); + rs = new ReduceSinkOperator(); + rs.setConf(new ReduceSinkDesc()); + ts = new TableScanOperator(); + ts.setConf(new TableScanDesc()); + ts.getChildOperators().add(rs); + rs.getParentOperators().add(ts); + rs.getChildOperators().add(fs); + fs.getParentOperators().add(rs); + ctx.preceedingWork = null; + ctx.currentRootOperator = ts; + } + + /** + * @throws java.lang.Exception + */ + @After + public void tearDown() throws Exception { + ctx = null; + proc = null; + ts = null; + rs = null; + fs = null; + } + + @Test + public void testCreateMap() throws SemanticException { + proc.process(rs, null, ctx, (Object[])null); + + assertNotNull(ctx.currentTask); + assertTrue(ctx.rootTasks.contains(ctx.currentTask)); + + TezWork work = ctx.currentTask.getWork(); + assertEquals(work.getAllWork().size(),1); + + BaseWork w = work.getAllWork().get(0); + assertTrue(w instanceof MapWork); + + MapWork mw = (MapWork)w; + + // need to make sure names are set for tez to connect things right + assertNotNull(w.getName()); + + // map work should start with our ts op + assertSame(mw.getAliasToWork().entrySet().iterator().next().getValue(),ts); + + // preceeding work must be set to the newly generated map + assertSame(ctx.preceedingWork, mw); + + // should have a new root now + assertSame(ctx.currentRootOperator, fs); + } + + @Test + public void testCreateReduce() throws SemanticException { + // create map + proc.process(rs, null, ctx, (Object[])null); + + // create reduce + proc.process(fs, null, ctx, (Object[])null); + + TezWork work = ctx.currentTask.getWork(); + assertEquals(work.getAllWork().size(),2); + + BaseWork w = work.getAllWork().get(1); + assertTrue(w instanceof ReduceWork); + assertTrue(work.getParents(w).contains(work.getAllWork().get(0))); + + ReduceWork rw = (ReduceWork)w; + + // need to make sure names are set for tez to connect things right + assertNotNull(w.getName()); + + // map work should start with our ts op + assertSame(rw.getReducer(),fs); + + // should have severed the ties + assertEquals(fs.getParentOperators().size(),0); + } +} Index: ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java (revision 0) +++ ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java (working copy) @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.plan; + +import java.util.LinkedList; +import java.util.List; + +import junit.framework.Assert; + +import org.apache.hadoop.hive.ql.plan.TezWork.EdgeType; +import org.junit.Before; +import org.junit.Test; + +public class TestTezWork { + + private List nodes; + private TezWork work; + + @Before + public void setup() throws Exception { + nodes = new LinkedList(); + work = new TezWork(); + addWork(5); + } + + private void addWork(int n) { + for (int i = 0; i < n; ++i) { + BaseWork w = new MapWork(); + nodes.add(w); + work.add(w); + } + } + + @Test + public void testAdd() throws Exception { + Assert.assertEquals(work.getAllWork().size(), nodes.size()); + Assert.assertEquals(work.getRoots().size(), nodes.size()); + Assert.assertEquals(work.getLeaves().size(), nodes.size()); + for (BaseWork w: nodes) { + Assert.assertEquals(work.getParents(w).size(), 0); + Assert.assertEquals(work.getChildren(w).size(), 0); + } + } + + @Test + public void testConnect() throws Exception { + BaseWork parent = nodes.get(0); + BaseWork child = nodes.get(1); + + work.connect(parent, child, EdgeType.SIMPLE_EDGE); + + Assert.assertEquals(work.getParents(child).size(), 1); + Assert.assertEquals(work.getChildren(parent).size(), 1); + Assert.assertEquals(work.getChildren(parent).get(0), child); + Assert.assertEquals(work.getParents(child).get(0), parent); + Assert.assertTrue(work.getRoots().contains(parent) && !work.getRoots().contains(child)); + Assert.assertTrue(!work.getLeaves().contains(parent) && work.getLeaves().contains(child)); + for (BaseWork w: nodes) { + if (w == parent || w == child) { + continue; + } + Assert.assertEquals(work.getParents(w).size(), 0); + Assert.assertEquals(work.getChildren(w).size(), 0); + } + + Assert.assertEquals(work.getEdgeProperty(parent, child), EdgeType.SIMPLE_EDGE); + } + + @Test + public void testBroadcastConnect() throws Exception { + BaseWork parent = nodes.get(0); + BaseWork child = nodes.get(1); + + work.connect(parent, child, EdgeType.BROADCAST_EDGE); + + Assert.assertEquals(work.getParents(child).size(), 1); + Assert.assertEquals(work.getChildren(parent).size(), 1); + Assert.assertEquals(work.getChildren(parent).get(0), child); + Assert.assertEquals(work.getParents(child).get(0), parent); + Assert.assertTrue(work.getRoots().contains(parent) && !work.getRoots().contains(child)); + Assert.assertTrue(!work.getLeaves().contains(parent) && work.getLeaves().contains(child)); + for (BaseWork w: nodes) { + if (w == parent || w == child) { + continue; + } + Assert.assertEquals(work.getParents(w).size(), 0); + Assert.assertEquals(work.getChildren(w).size(), 0); + } + + Assert.assertEquals(work.getEdgeProperty(parent, child), EdgeType.BROADCAST_EDGE); + } + + @Test + public void testDisconnect() throws Exception { + BaseWork parent = nodes.get(0); + BaseWork children[] = {nodes.get(1), nodes.get(2)}; + + work.connect(parent, children[0], EdgeType.SIMPLE_EDGE); + work.connect(parent, children[1], EdgeType.SIMPLE_EDGE); + + work.disconnect(parent, children[0]); + + Assert.assertTrue(work.getChildren(parent).contains(children[1])); + Assert.assertTrue(!work.getChildren(parent).contains(children[0])); + Assert.assertTrue(work.getRoots().contains(parent) && work.getRoots().contains(children[0]) + && !work.getRoots().contains(children[1])); + Assert.assertTrue(!work.getLeaves().contains(parent) && work.getLeaves().contains(children[0]) + && work.getLeaves().contains(children[1])); + } + + @Test + public void testRemove() throws Exception { + BaseWork parent = nodes.get(0); + BaseWork children[] = {nodes.get(1), nodes.get(2)}; + + work.connect(parent, children[0], EdgeType.SIMPLE_EDGE); + work.connect(parent, children[1], EdgeType.SIMPLE_EDGE); + + work.remove(parent); + + Assert.assertEquals(work.getParents(children[0]).size(), 0); + Assert.assertEquals(work.getParents(children[1]).size(), 0); + Assert.assertEquals(work.getAllWork().size(), nodes.size()-1); + Assert.assertEquals(work.getRoots().size(), nodes.size()-1); + Assert.assertEquals(work.getLeaves().size(), nodes.size()-1); + } + + @Test + public void testGetAllWork() throws Exception { + for (int i = 4; i > 0; --i) { + work.connect(nodes.get(i), nodes.get(i-1), EdgeType.SIMPLE_EDGE); + } + + List sorted = work.getAllWork(); + for (int i = 0; i < 5; ++i) { + Assert.assertEquals(sorted.get(i), nodes.get(4-i)); + } + } +} Index: ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java (revision 1553449) +++ ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java (working copy) @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.session; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.MetaStoreUtils; @@ -57,5 +58,11 @@ } - + @Test + public void testClose() throws Exception { + SessionState ss = SessionState.get(); + assertNull(ss.getTezSession()); + ss.close(); + assertNull(ss.getTezSession()); + } } Index: ql/src/test/queries/clientpositive/mrr.q =================================================================== --- ql/src/test/queries/clientpositive/mrr.q (revision 0) +++ ql/src/test/queries/clientpositive/mrr.q (working copy) @@ -0,0 +1,61 @@ +set hive.optimize.tez=true; + +-- simple query with multiple reduce stages +EXPLAIN SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt; +SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt; + +set hive.auto.convert.join=false; +-- join query with multiple reduce stages; +EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt; +SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt; + +set hive.auto.convert.join=true; +-- same query with broadcast join +EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt; +SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt; + +set hive.auto.convert.join=false; +-- query with multiple branches in the task dag +EXPLAIN +SELECT * +FROM + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s1 + JOIN + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s2 + JOIN + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s3 + ON (s1.key = s2.key and s1.key = s3.key) +WHERE + s1.cnt > 1 +ORDER BY s1.key; + +SELECT * +FROM + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s1 + JOIN + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s2 + JOIN + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s3 + ON (s1.key = s2.key and s1.key = s3.key) +WHERE + s1.cnt > 1 +ORDER BY s1.key; + +set hive.auto.convert.join=true; +-- query with broadcast join in the reduce stage +EXPLAIN +SELECT * +FROM + (SELECT key, count(value) as cnt FROM src GROUP BY key) s1 + JOIN src ON (s1.key = src.key); + +SELECT * +FROM + (SELECT key, count(value) as cnt FROM src GROUP BY key) s1 + JOIN src ON (s1.key = src.key); Index: ql/src/test/queries/clientpositive/tez_dml.q =================================================================== --- ql/src/test/queries/clientpositive/tez_dml.q (revision 0) +++ ql/src/test/queries/clientpositive/tez_dml.q (working copy) @@ -0,0 +1,37 @@ +set hive.optimize.tez=true; +set hive.exec.dynamic.partition.mode=nonstrict; + +-- CTAS +EXPLAIN CREATE TABLE tmp_src AS SELECT * FROM (SELECT value, count(value) AS cnt FROM src GROUP BY value) f1 ORDER BY cnt; +CREATE TABLE tmp_src AS SELECT * FROM (SELECT value, count(value) AS cnt FROM src GROUP BY value) f1 ORDER BY cnt; + +SELECT * FROM tmp_src; + +-- dyn partitions +CREATE TABLE tmp_src_part (c string) PARTITIONED BY (d int); +EXPLAIN INSERT INTO TABLE tmp_src_part PARTITION (d) SELECT * FROM tmp_src; +INSERT INTO TABLE tmp_src_part PARTITION (d) SELECT * FROM tmp_src; + +SELECT * FROM tmp_src_part; + +-- multi insert +CREATE TABLE even (c int, d string); +CREATE TABLE odd (c int, d string); + +EXPLAIN +FROM src +INSERT INTO TABLE even SELECT key, value WHERE key % 2 = 0 +INSERT INTO TABLE odd SELECT key, value WHERE key % 2 = 1; + +FROM src +INSERT INTO TABLE even SELECT key, value WHERE key % 2 = 0 +INSERT INTO TABLE odd SELECT key, value WHERE key % 2 = 1; + +SELECT * FROM even; +SELECT * FROM odd; + +-- drop the tables +DROP TABLE even; +DROP TABLE odd; +DROP TABLE tmp_src; +DROP TABLE tmp_src_part; Index: ql/src/test/queries/clientpositive/tez_insert_overwrite_local_directory_1.q =================================================================== --- ql/src/test/queries/clientpositive/tez_insert_overwrite_local_directory_1.q (revision 0) +++ ql/src/test/queries/clientpositive/tez_insert_overwrite_local_directory_1.q (working copy) @@ -0,0 +1,7 @@ +set hive.optimize.tez=true; + +insert overwrite local directory '${system:test.tmp.dir}/tez_local_src_table_1' +select * from src order by key limit 10 ; +dfs -cat file:${system:test.tmp.dir}/tez_local_src_table_1/000000_0 ; + +dfs -rmr file:${system:test.tmp.dir}/tez_local_src_table_1/ ; Index: ql/src/test/queries/clientpositive/tez_join_tests.q =================================================================== --- ql/src/test/queries/clientpositive/tez_join_tests.q (revision 0) +++ ql/src/test/queries/clientpositive/tez_join_tests.q (working copy) @@ -0,0 +1,13 @@ +set hive.optimize.tez=true; +explain +select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key; + +select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key; +select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key)) x right outer join src c on (x.value = c.value) order by x.key; +select * from src1 a left outer join src b on (a.key = b.key) right outer join src c on (a.value = c.value) order by a.key; +select * from src1 a left outer join src b on (a.key = b.key) left outer join src c on (a.value = c.value) order by a.key; +select * from src1 a left outer join src b on (a.key = b.key) join src c on (a.key = c.key); +select * from src1 a join src b on (a.key = b.key) join src c on (a.key = c.key); + +select count(*) from src1 a join src b on (a.key = b.key) join src c on (a.key = c.key); + Index: ql/src/test/queries/clientpositive/tez_joins_explain.q =================================================================== --- ql/src/test/queries/clientpositive/tez_joins_explain.q (revision 0) +++ ql/src/test/queries/clientpositive/tez_joins_explain.q (working copy) @@ -0,0 +1,6 @@ +set hive.optimize.tez=true; +explain +select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key; + +select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key; + Index: ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out (revision 1553449) +++ ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out (working copy) @@ -174,6 +174,8 @@ 0 [Column[key]] 1 [Column[key]] Position of Big Table: 1 + Statistics: + numRows: 127 dataSize: 12786 basicStatsState: COMPLETE colStatsState: NONE Select Operator Statistics: numRows: 127 dataSize: 12786 basicStatsState: COMPLETE colStatsState: NONE @@ -498,6 +500,8 @@ 0 [Column[key]] 1 [Column[key]] Position of Big Table: 1 + Statistics: + numRows: 127 dataSize: 12786 basicStatsState: COMPLETE colStatsState: NONE Select Operator Statistics: numRows: 127 dataSize: 12786 basicStatsState: COMPLETE colStatsState: NONE Index: ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out (revision 1553449) +++ ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out (working copy) @@ -334,6 +334,8 @@ 1 [Column[key]] 2 [Column[key]] Position of Big Table: 2 + Statistics: + numRows: 255 dataSize: 25572 basicStatsState: COMPLETE colStatsState: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -345,6 +347,8 @@ 0 [] 1 [] Position of Big Table: 0 + Statistics: + numRows: 280 dataSize: 28129 basicStatsState: COMPLETE colStatsState: NONE Select Operator Statistics: numRows: 280 dataSize: 28129 basicStatsState: COMPLETE colStatsState: NONE Index: ql/src/test/results/clientpositive/join32.q.out =================================================================== --- ql/src/test/results/clientpositive/join32.q.out (revision 1553449) +++ ql/src/test/results/clientpositive/join32.q.out (working copy) @@ -133,6 +133,8 @@ 1 [Column[key]] outputColumnNames: _col0, _col1, _col5 Position of Big Table: 1 + Statistics: + numRows: 31 dataSize: 6393 basicStatsState: COMPLETE colStatsState: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -145,6 +147,8 @@ 1 [Column[value]] outputColumnNames: _col1, _col4, _col9 Position of Big Table: 0 + Statistics: + numRows: 63 dataSize: 6393 basicStatsState: COMPLETE colStatsState: NONE Select Operator expressions: expr: _col4 Index: ql/src/test/results/clientpositive/join32_lessSize.q.out =================================================================== --- ql/src/test/results/clientpositive/join32_lessSize.q.out (revision 1553449) +++ ql/src/test/results/clientpositive/join32_lessSize.q.out (working copy) @@ -80,6 +80,8 @@ 1 [Column[key]] outputColumnNames: _col0, _col1, _col5 Position of Big Table: 1 + Statistics: + numRows: 31 dataSize: 6393 basicStatsState: COMPLETE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 @@ -272,6 +274,8 @@ 1 [Column[value]] outputColumnNames: _col1, _col4, _col9 Position of Big Table: 0 + Statistics: + numRows: 63 dataSize: 6393 basicStatsState: COMPLETE colStatsState: NONE Select Operator expressions: expr: _col4 @@ -603,6 +607,8 @@ 1 [Column[value]] outputColumnNames: _col4 Position of Big Table: 0 + Statistics: + numRows: 63 dataSize: 6393 basicStatsState: COMPLETE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 @@ -1807,6 +1813,8 @@ 1 [Column[key]] outputColumnNames: _col0, _col1 Position of Big Table: 1 + Statistics: + numRows: 63 dataSize: 6393 basicStatsState: COMPLETE colStatsState: NONE Select Operator expressions: expr: _col0 @@ -2008,6 +2016,8 @@ 1 [Column[value]] outputColumnNames: _col0, _col1, _col3 Position of Big Table: 0 + Statistics: + numRows: 69 dataSize: 7032 basicStatsState: COMPLETE colStatsState: NONE Select Operator expressions: expr: _col0 @@ -2349,6 +2359,8 @@ 1 [Column[key]] outputColumnNames: _col0, _col1 Position of Big Table: 0 + Statistics: + numRows: 63 dataSize: 6393 basicStatsState: COMPLETE colStatsState: NONE Select Operator expressions: expr: _col0 @@ -2550,6 +2562,8 @@ 1 [Column[value]] outputColumnNames: _col0, _col1, _col3 Position of Big Table: 0 + Statistics: + numRows: 69 dataSize: 7032 basicStatsState: COMPLETE colStatsState: NONE Select Operator expressions: expr: _col0 Index: ql/src/test/results/clientpositive/join33.q.out =================================================================== --- ql/src/test/results/clientpositive/join33.q.out (revision 1553449) +++ ql/src/test/results/clientpositive/join33.q.out (working copy) @@ -133,6 +133,8 @@ 1 [Column[key]] outputColumnNames: _col0, _col1, _col5 Position of Big Table: 1 + Statistics: + numRows: 31 dataSize: 6393 basicStatsState: COMPLETE colStatsState: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -145,6 +147,8 @@ 1 [Column[value]] outputColumnNames: _col1, _col4, _col9 Position of Big Table: 0 + Statistics: + numRows: 63 dataSize: 6393 basicStatsState: COMPLETE colStatsState: NONE Select Operator expressions: expr: _col4 Index: ql/src/test/results/clientpositive/join34.q.out =================================================================== --- ql/src/test/results/clientpositive/join34.q.out (revision 1553449) +++ ql/src/test/results/clientpositive/join34.q.out (working copy) @@ -100,6 +100,8 @@ 1 [Column[key]] outputColumnNames: _col1, _col2, _col3 Position of Big Table: 0 + Statistics: + numRows: 19 dataSize: 3966 basicStatsState: COMPLETE colStatsState: NONE Select Operator expressions: expr: _col2 @@ -174,6 +176,8 @@ 1 [Column[key]] outputColumnNames: _col1, _col2, _col3 Position of Big Table: 0 + Statistics: + numRows: 19 dataSize: 3966 basicStatsState: COMPLETE colStatsState: NONE Select Operator expressions: expr: _col2 Index: ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out =================================================================== --- ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out (revision 1553449) +++ ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out (working copy) @@ -184,16 +184,16 @@ STAGE DEPENDENCIES: Stage-2 is a root stage - Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-9 depends on stages: Stage-5, Stage-4, Stage-7, Stage-12, Stage-11, Stage-14 - Stage-0 depends on stages: Stage-9 + Stage-9 depends on stages: Stage-2 , consists of Stage-6, Stage-5, Stage-7 + Stage-6 + Stage-4 depends on stages: Stage-6, Stage-5, Stage-8, Stage-12, Stage-11, Stage-14 + Stage-0 depends on stages: Stage-4 Stage-3 depends on stages: Stage-0 - Stage-1 depends on stages: Stage-9 + Stage-1 depends on stages: Stage-4 Stage-10 depends on stages: Stage-1 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 + Stage-5 + Stage-7 + Stage-8 depends on stages: Stage-7 Stage-15 depends on stages: Stage-2 , consists of Stage-12, Stage-11, Stage-13 Stage-12 Stage-11 @@ -246,16 +246,16 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi2 - Stage: Stage-8 + Stage: Stage-9 Conditional Operator - Stage: Stage-5 + Stage: Stage-6 Move Operator files: hdfs directory: true #### A masked pattern was here #### - Stage: Stage-9 + Stage: Stage-4 Dependency Collection Stage: Stage-0 @@ -284,7 +284,7 @@ Stage: Stage-10 Stats-Aggr Operator - Stage: Stage-4 + Stage: Stage-5 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -298,7 +298,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-6 + Stage: Stage-7 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -312,7 +312,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-7 + Stage: Stage-8 Move Operator files: hdfs directory: true @@ -650,16 +650,16 @@ STAGE DEPENDENCIES: Stage-2 is a root stage - Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-9 depends on stages: Stage-5, Stage-4, Stage-7, Stage-12, Stage-11, Stage-14 - Stage-0 depends on stages: Stage-9 + Stage-9 depends on stages: Stage-2 , consists of Stage-6, Stage-5, Stage-7 + Stage-6 + Stage-4 depends on stages: Stage-6, Stage-5, Stage-8, Stage-12, Stage-11, Stage-14 + Stage-0 depends on stages: Stage-4 Stage-3 depends on stages: Stage-0 - Stage-1 depends on stages: Stage-9 + Stage-1 depends on stages: Stage-4 Stage-10 depends on stages: Stage-1 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 + Stage-5 + Stage-7 + Stage-8 depends on stages: Stage-7 Stage-15 depends on stages: Stage-2 , consists of Stage-12, Stage-11, Stage-13 Stage-12 Stage-11 @@ -712,16 +712,16 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi2 - Stage: Stage-8 + Stage: Stage-9 Conditional Operator - Stage: Stage-5 + Stage: Stage-6 Move Operator files: hdfs directory: true #### A masked pattern was here #### - Stage: Stage-9 + Stage: Stage-4 Dependency Collection Stage: Stage-0 @@ -750,7 +750,7 @@ Stage: Stage-10 Stats-Aggr Operator - Stage: Stage-4 + Stage: Stage-5 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -764,7 +764,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-6 + Stage: Stage-7 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -778,7 +778,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-7 + Stage: Stage-8 Move Operator files: hdfs directory: true @@ -1201,16 +1201,16 @@ STAGE DEPENDENCIES: Stage-2 is a root stage - Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-9 depends on stages: Stage-5, Stage-4, Stage-7, Stage-12, Stage-11, Stage-14 - Stage-0 depends on stages: Stage-9 + Stage-9 depends on stages: Stage-2 , consists of Stage-6, Stage-5, Stage-7 + Stage-6 + Stage-4 depends on stages: Stage-6, Stage-5, Stage-8, Stage-12, Stage-11, Stage-14 + Stage-0 depends on stages: Stage-4 Stage-3 depends on stages: Stage-0 - Stage-1 depends on stages: Stage-9 + Stage-1 depends on stages: Stage-4 Stage-10 depends on stages: Stage-1 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 + Stage-5 + Stage-7 + Stage-8 depends on stages: Stage-7 Stage-15 depends on stages: Stage-2 , consists of Stage-12, Stage-11, Stage-13 Stage-12 Stage-11 @@ -1307,16 +1307,16 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi2 - Stage: Stage-8 + Stage: Stage-9 Conditional Operator - Stage: Stage-5 + Stage: Stage-6 Move Operator files: hdfs directory: true #### A masked pattern was here #### - Stage: Stage-9 + Stage: Stage-4 Dependency Collection Stage: Stage-0 @@ -1345,7 +1345,7 @@ Stage: Stage-10 Stats-Aggr Operator - Stage: Stage-4 + Stage: Stage-5 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -1359,7 +1359,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-6 + Stage: Stage-7 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -1373,7 +1373,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-7 + Stage: Stage-8 Move Operator files: hdfs directory: true @@ -1789,16 +1789,16 @@ STAGE DEPENDENCIES: Stage-2 is a root stage - Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-9 depends on stages: Stage-5, Stage-4, Stage-7, Stage-12, Stage-11, Stage-14 - Stage-0 depends on stages: Stage-9 + Stage-9 depends on stages: Stage-2 , consists of Stage-6, Stage-5, Stage-7 + Stage-6 + Stage-4 depends on stages: Stage-6, Stage-5, Stage-8, Stage-12, Stage-11, Stage-14 + Stage-0 depends on stages: Stage-4 Stage-3 depends on stages: Stage-0 - Stage-1 depends on stages: Stage-9 + Stage-1 depends on stages: Stage-4 Stage-10 depends on stages: Stage-1 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 + Stage-5 + Stage-7 + Stage-8 depends on stages: Stage-7 Stage-15 depends on stages: Stage-2 , consists of Stage-12, Stage-11, Stage-13 Stage-12 Stage-11 @@ -1895,16 +1895,16 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi2 - Stage: Stage-8 + Stage: Stage-9 Conditional Operator - Stage: Stage-5 + Stage: Stage-6 Move Operator files: hdfs directory: true #### A masked pattern was here #### - Stage: Stage-9 + Stage: Stage-4 Dependency Collection Stage: Stage-0 @@ -1933,7 +1933,7 @@ Stage: Stage-10 Stats-Aggr Operator - Stage: Stage-4 + Stage: Stage-5 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -1947,7 +1947,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-6 + Stage: Stage-7 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -1961,7 +1961,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-7 + Stage: Stage-8 Move Operator files: hdfs directory: true @@ -2432,16 +2432,16 @@ STAGE DEPENDENCIES: Stage-2 is a root stage - Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-9 depends on stages: Stage-5, Stage-4, Stage-7, Stage-12, Stage-11, Stage-14 - Stage-0 depends on stages: Stage-9 + Stage-9 depends on stages: Stage-2 , consists of Stage-6, Stage-5, Stage-7 + Stage-6 + Stage-4 depends on stages: Stage-6, Stage-5, Stage-8, Stage-12, Stage-11, Stage-14 + Stage-0 depends on stages: Stage-4 Stage-3 depends on stages: Stage-0 - Stage-1 depends on stages: Stage-9 + Stage-1 depends on stages: Stage-4 Stage-10 depends on stages: Stage-1 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 + Stage-5 + Stage-7 + Stage-8 depends on stages: Stage-7 Stage-15 depends on stages: Stage-2 , consists of Stage-12, Stage-11, Stage-13 Stage-12 Stage-11 @@ -2551,16 +2551,16 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi2 - Stage: Stage-8 + Stage: Stage-9 Conditional Operator - Stage: Stage-5 + Stage: Stage-6 Move Operator files: hdfs directory: true #### A masked pattern was here #### - Stage: Stage-9 + Stage: Stage-4 Dependency Collection Stage: Stage-0 @@ -2589,7 +2589,7 @@ Stage: Stage-10 Stats-Aggr Operator - Stage: Stage-4 + Stage: Stage-5 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -2603,7 +2603,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-6 + Stage: Stage-7 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -2617,7 +2617,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-7 + Stage: Stage-8 Move Operator files: hdfs directory: true @@ -3178,16 +3178,16 @@ STAGE DEPENDENCIES: Stage-2 is a root stage - Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-9 depends on stages: Stage-5, Stage-4, Stage-7, Stage-12, Stage-11, Stage-14 - Stage-0 depends on stages: Stage-9 + Stage-9 depends on stages: Stage-2 , consists of Stage-6, Stage-5, Stage-7 + Stage-6 + Stage-4 depends on stages: Stage-6, Stage-5, Stage-8, Stage-12, Stage-11, Stage-14 + Stage-0 depends on stages: Stage-4 Stage-3 depends on stages: Stage-0 - Stage-1 depends on stages: Stage-9 + Stage-1 depends on stages: Stage-4 Stage-10 depends on stages: Stage-1 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 + Stage-5 + Stage-7 + Stage-8 depends on stages: Stage-7 Stage-15 depends on stages: Stage-2 , consists of Stage-12, Stage-11, Stage-13 Stage-12 Stage-11 @@ -3297,16 +3297,16 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi2 - Stage: Stage-8 + Stage: Stage-9 Conditional Operator - Stage: Stage-5 + Stage: Stage-6 Move Operator files: hdfs directory: true #### A masked pattern was here #### - Stage: Stage-9 + Stage: Stage-4 Dependency Collection Stage: Stage-0 @@ -3335,7 +3335,7 @@ Stage: Stage-10 Stats-Aggr Operator - Stage: Stage-4 + Stage: Stage-5 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -3349,7 +3349,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-6 + Stage: Stage-7 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -3363,7 +3363,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-7 + Stage: Stage-8 Move Operator files: hdfs directory: true @@ -6276,16 +6276,16 @@ STAGE DEPENDENCIES: Stage-4 is a root stage - Stage-10 depends on stages: Stage-4 , consists of Stage-7, Stage-6, Stage-8 - Stage-7 - Stage-11 depends on stages: Stage-7, Stage-6, Stage-9, Stage-14, Stage-13, Stage-16, Stage-18, Stage-19 - Stage-0 depends on stages: Stage-11 + Stage-11 depends on stages: Stage-4 , consists of Stage-8, Stage-7, Stage-9 + Stage-8 + Stage-6 depends on stages: Stage-8, Stage-7, Stage-10, Stage-14, Stage-13, Stage-16, Stage-18, Stage-19 + Stage-0 depends on stages: Stage-6 Stage-5 depends on stages: Stage-0 - Stage-1 depends on stages: Stage-11 + Stage-1 depends on stages: Stage-6 Stage-12 depends on stages: Stage-1 - Stage-6 - Stage-8 - Stage-9 depends on stages: Stage-8 + Stage-7 + Stage-9 + Stage-10 depends on stages: Stage-9 Stage-17 depends on stages: Stage-4 , consists of Stage-14, Stage-13, Stage-15 Stage-14 Stage-13 @@ -6422,16 +6422,16 @@ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Stage: Stage-10 + Stage: Stage-11 Conditional Operator - Stage: Stage-7 + Stage: Stage-8 Move Operator files: hdfs directory: true #### A masked pattern was here #### - Stage: Stage-11 + Stage: Stage-6 Dependency Collection Stage: Stage-0 @@ -6460,7 +6460,7 @@ Stage: Stage-12 Stats-Aggr Operator - Stage: Stage-6 + Stage: Stage-7 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -6474,7 +6474,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-8 + Stage: Stage-9 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -6488,7 +6488,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-9 + Stage: Stage-10 Move Operator files: hdfs directory: true @@ -6855,16 +6855,16 @@ STAGE DEPENDENCIES: Stage-4 is a root stage - Stage-10 depends on stages: Stage-4 , consists of Stage-7, Stage-6, Stage-8 - Stage-7 - Stage-11 depends on stages: Stage-7, Stage-6, Stage-9, Stage-14, Stage-13, Stage-16, Stage-18, Stage-19 - Stage-0 depends on stages: Stage-11 + Stage-11 depends on stages: Stage-4 , consists of Stage-8, Stage-7, Stage-9 + Stage-8 + Stage-6 depends on stages: Stage-8, Stage-7, Stage-10, Stage-14, Stage-13, Stage-16, Stage-18, Stage-19 + Stage-0 depends on stages: Stage-6 Stage-5 depends on stages: Stage-0 - Stage-1 depends on stages: Stage-11 + Stage-1 depends on stages: Stage-6 Stage-12 depends on stages: Stage-1 - Stage-6 - Stage-8 - Stage-9 depends on stages: Stage-8 + Stage-7 + Stage-9 + Stage-10 depends on stages: Stage-9 Stage-17 depends on stages: Stage-4 , consists of Stage-14, Stage-13, Stage-15 Stage-14 Stage-13 @@ -7001,16 +7001,16 @@ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Stage: Stage-10 + Stage: Stage-11 Conditional Operator - Stage: Stage-7 + Stage: Stage-8 Move Operator files: hdfs directory: true #### A masked pattern was here #### - Stage: Stage-11 + Stage: Stage-6 Dependency Collection Stage: Stage-0 @@ -7039,7 +7039,7 @@ Stage: Stage-12 Stats-Aggr Operator - Stage: Stage-6 + Stage: Stage-7 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -7053,7 +7053,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-8 + Stage: Stage-9 Map Reduce Alias -> Map Operator Tree: #### A masked pattern was here #### @@ -7067,7 +7067,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.src_multi1 - Stage: Stage-9 + Stage: Stage-10 Move Operator files: hdfs directory: true Index: ql/src/test/results/clientpositive/tez/auto_join0.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/auto_join0.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/auto_join0.q.out (working copy) @@ -0,0 +1,188 @@ +PREHOOK: query: explain +select sum(hash(a.k1,a.v1,a.k2, a.v2)) +from ( +SELECT src1.key as k1, src1.value as v1, + src2.key as k2, src2.value as v2 FROM + (SELECT * FROM src WHERE src.key < 10) src1 + JOIN + (SELECT * FROM src WHERE src.key < 10) src2 + SORT BY k1, v1, k2, v2 +) a +PREHOOK: type: QUERY +POSTHOOK: query: explain +select sum(hash(a.k1,a.v1,a.k2, a.v2)) +from ( +SELECT src1.key as k1, src1.value as v1, + src2.key as k2, src2.value as v2 FROM + (SELECT * FROM src WHERE src.key < 10) src1 + JOIN + (SELECT * FROM src WHERE src.key < 10) src2 + SORT BY k1, v1, k2, v2 +) a +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 10)))) src1) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 10)))) src2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) k1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) v1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) k2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) v2)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k2)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v2))))) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL a) k1) (. (TOK_TABLE_OR_COL a) v1) (. (TOK_TABLE_OR_COL a) k2) (. (TOK_TABLE_OR_COL a) v2))))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Filter Operator + predicate: + expr: (key < 10) + type: boolean + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Reduce Output Operator + sort order: + tag: 1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Alias -> Map Operator Tree: + src + TableScan + alias: src + Filter Operator + predicate: + expr: (key < 10) + type: boolean + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} + 1 {_col0} {_col1} + handleSkewJoin: false + keys: + 0 [] + 1 [] + outputColumnNames: _col0, _col1, _col2, _col3 + Position of Big Table: 0 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col2 + type: string + expr: _col3 + type: string + outputColumnNames: _col0, _col1, _col2, _col3 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col2 + type: string + expr: _col3 + type: string + sort order: ++++ + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col2 + type: string + expr: _col3 + type: string + Reduce Operator Tree: + Extract + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col2 + type: string + expr: _col3 + type: string + outputColumnNames: _col0, _col1, _col2, _col3 + Group By Operator + aggregations: + expr: sum(hash(_col0,_col1,_col2,_col3)) + bucketGroup: false + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + tag: -1 + value expressions: + expr: _col0 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: sum(VALUE._col0) + bucketGroup: false + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: + expr: _col0 + type: bigint + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select sum(hash(a.k1,a.v1,a.k2, a.v2)) +from ( +SELECT src1.key as k1, src1.value as v1, + src2.key as k2, src2.value as v2 FROM + (SELECT * FROM src WHERE src.key < 10) src1 + JOIN + (SELECT * FROM src WHERE src.key < 10) src2 + SORT BY k1, v1, k2, v2 +) a +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select sum(hash(a.k1,a.v1,a.k2, a.v2)) +from ( +SELECT src1.key as k1, src1.value as v1, + src2.key as k2, src2.value as v2 FROM + (SELECT * FROM src WHERE src.key < 10) src1 + JOIN + (SELECT * FROM src WHERE src.key < 10) src2 + SORT BY k1, v1, k2, v2 +) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +34441656720 Index: ql/src/test/results/clientpositive/tez/auto_join1.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/auto_join1.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/auto_join1.q.out (working copy) @@ -0,0 +1,160 @@ +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest_j1 +PREHOOK: query: explain +FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +PREHOOK: type: QUERY +POSTHOOK: query: explain +FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_j1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src2 + TableScan + alias: src2 + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 1 + value expressions: + expr: value + type: string + Alias -> Map Operator Tree: + src1 + TableScan + alias: src1 + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} + 1 {value} + handleSkewJoin: false + keys: + 0 [Column[key]] + 1 [Column[key]] + outputColumnNames: _col0, _col5 + Position of Big Table: 0 + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: _col5 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_j1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_j1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_j1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_j1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest_j1 +POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest_j1 +POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest_j1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest_j1 +#### A masked pattern was here #### +POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +101861029915 Index: ql/src/test/results/clientpositive/tez/bucket2.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/bucket2.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/bucket2.q.out (working copy) @@ -0,0 +1,499 @@ +PREHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@bucket2_1 +PREHOOK: query: explain extended +insert overwrite table bucket2_1 +select * from src +PREHOOK: type: QUERY +POSTHOOK: query: explain extended +insert overwrite table bucket2_1 +select * from src +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucket2_1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + GatherStats: false + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + Reduce Output Operator + sort order: + Map-reduce partition columns: + expr: UDFToInteger(_col0) + type: int + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src + name: default.src + Truncated Path -> Alias: + /src [src] + Needs Tagging: false + Reduce Operator Tree: + Extract + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 2 + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.types int:string +#### A masked pattern was here #### + name default.bucket2_1 + serialization.ddl struct bucket2_1 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket2_1 + TotalFiles: 2 + GatherStats: true + MultiFileSpray: true + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.types int:string +#### A masked pattern was here #### + name default.bucket2_1 + serialization.ddl struct bucket2_1 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket2_1 +#### A masked pattern was here #### + + Stage: Stage-3 + Stats-Aggr Operator +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table bucket2_1 +select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@bucket2_1 +POSTHOOK: query: insert overwrite table bucket2_1 +select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@bucket2_1 +POSTHOOK: Lineage: bucket2_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket2_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain +select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key +POSTHOOK: type: QUERY +POSTHOOK: Lineage: bucket2_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket2_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME bucket2_1) (TOK_TABLEBUCKETSAMPLE 1 2) s)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + s + TableScan + alias: s + Filter Operator + predicate: + expr: (((hash(key) & 2147483647) % 2) = 0) + type: boolean + Select Operator + expressions: + expr: key + type: int + expr: value + type: string + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: int + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: int + expr: _col1 + type: string + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket2_1 +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket2_1 +#### A masked pattern was here #### +POSTHOOK: Lineage: bucket2_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket2_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 val_0 +0 val_0 +0 val_0 +2 val_2 +4 val_4 +8 val_8 +10 val_10 +12 val_12 +12 val_12 +18 val_18 +18 val_18 +20 val_20 +24 val_24 +24 val_24 +26 val_26 +26 val_26 +28 val_28 +30 val_30 +34 val_34 +42 val_42 +42 val_42 +44 val_44 +54 val_54 +58 val_58 +58 val_58 +64 val_64 +66 val_66 +70 val_70 +70 val_70 +70 val_70 +72 val_72 +72 val_72 +74 val_74 +76 val_76 +76 val_76 +78 val_78 +80 val_80 +82 val_82 +84 val_84 +84 val_84 +86 val_86 +90 val_90 +90 val_90 +90 val_90 +92 val_92 +96 val_96 +98 val_98 +98 val_98 +100 val_100 +100 val_100 +104 val_104 +104 val_104 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +120 val_120 +120 val_120 +126 val_126 +128 val_128 +128 val_128 +128 val_128 +134 val_134 +134 val_134 +136 val_136 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +146 val_146 +146 val_146 +150 val_150 +152 val_152 +152 val_152 +156 val_156 +158 val_158 +160 val_160 +162 val_162 +164 val_164 +164 val_164 +166 val_166 +168 val_168 +170 val_170 +172 val_172 +172 val_172 +174 val_174 +174 val_174 +176 val_176 +176 val_176 +178 val_178 +180 val_180 +186 val_186 +190 val_190 +192 val_192 +194 val_194 +196 val_196 +200 val_200 +200 val_200 +202 val_202 +208 val_208 +208 val_208 +208 val_208 +214 val_214 +216 val_216 +216 val_216 +218 val_218 +222 val_222 +224 val_224 +224 val_224 +226 val_226 +228 val_228 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +238 val_238 +238 val_238 +242 val_242 +242 val_242 +244 val_244 +248 val_248 +252 val_252 +256 val_256 +256 val_256 +258 val_258 +260 val_260 +262 val_262 +266 val_266 +272 val_272 +272 val_272 +274 val_274 +278 val_278 +278 val_278 +280 val_280 +280 val_280 +282 val_282 +282 val_282 +284 val_284 +286 val_286 +288 val_288 +288 val_288 +292 val_292 +296 val_296 +298 val_298 +298 val_298 +298 val_298 +302 val_302 +306 val_306 +308 val_308 +310 val_310 +316 val_316 +316 val_316 +316 val_316 +318 val_318 +318 val_318 +318 val_318 +322 val_322 +322 val_322 +332 val_332 +336 val_336 +338 val_338 +342 val_342 +342 val_342 +344 val_344 +344 val_344 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +356 val_356 +360 val_360 +362 val_362 +364 val_364 +366 val_366 +368 val_368 +374 val_374 +378 val_378 +382 val_382 +382 val_382 +384 val_384 +384 val_384 +384 val_384 +386 val_386 +392 val_392 +394 val_394 +396 val_396 +396 val_396 +396 val_396 +400 val_400 +402 val_402 +404 val_404 +404 val_404 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +414 val_414 +414 val_414 +418 val_418 +424 val_424 +424 val_424 +430 val_430 +430 val_430 +430 val_430 +432 val_432 +436 val_436 +438 val_438 +438 val_438 +438 val_438 +444 val_444 +446 val_446 +448 val_448 +452 val_452 +454 val_454 +454 val_454 +454 val_454 +458 val_458 +458 val_458 +460 val_460 +462 val_462 +462 val_462 +466 val_466 +466 val_466 +466 val_466 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +470 val_470 +472 val_472 +478 val_478 +478 val_478 +480 val_480 +480 val_480 +480 val_480 +482 val_482 +484 val_484 +490 val_490 +492 val_492 +492 val_492 +494 val_494 +496 val_496 +498 val_498 +498 val_498 +498 val_498 Index: ql/src/test/results/clientpositive/tez/bucket3.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/bucket3.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/bucket3.q.out (working copy) @@ -0,0 +1,528 @@ +PREHOOK: query: CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@bucket3_1 +PREHOOK: query: explain extended +insert overwrite table bucket3_1 partition (ds='1') +select * from src +PREHOOK: type: QUERY +POSTHOOK: query: explain extended +insert overwrite table bucket3_1 partition (ds='1') +select * from src +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucket3_1) (TOK_PARTSPEC (TOK_PARTVAL ds '1')))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + GatherStats: false + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + Reduce Output Operator + sort order: + Map-reduce partition columns: + expr: UDFToInteger(_col0) + type: int + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src + name: default.src + Truncated Path -> Alias: + /src [src] + Needs Tagging: false + Reduce Operator Tree: + Extract + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 2 + Static Partition Specification: ds=1/ + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.types int:string +#### A masked pattern was here #### + name default.bucket3_1 + partition_columns ds + serialization.ddl struct bucket3_1 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket3_1 + TotalFiles: 2 + GatherStats: true + MultiFileSpray: true + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.types int:string +#### A masked pattern was here #### + name default.bucket3_1 + partition_columns ds + serialization.ddl struct bucket3_1 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket3_1 +#### A masked pattern was here #### + + Stage: Stage-3 + Stats-Aggr Operator +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table bucket3_1 partition (ds='1') +select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@bucket3_1@ds=1 +POSTHOOK: query: insert overwrite table bucket3_1 partition (ds='1') +select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@bucket3_1@ds=1 +POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table bucket3_1 partition (ds='2') +select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@bucket3_1@ds=2 +POSTHOOK: query: insert overwrite table bucket3_1 partition (ds='2') +select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@bucket3_1@ds=2 +POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain +select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key +POSTHOOK: type: QUERY +POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME bucket3_1) (TOK_TABLEBUCKETSAMPLE 1 2) s)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '1')) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + s + TableScan + alias: s + Filter Operator + predicate: + expr: (((hash(key) & 2147483647) % 2) = 0) + type: boolean + Select Operator + expressions: + expr: key + type: int + expr: value + type: string + expr: ds + type: string + outputColumnNames: _col0, _col1, _col2 + Reduce Output Operator + key expressions: + expr: _col0 + type: int + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: int + expr: _col1 + type: string + expr: _col2 + type: string + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket3_1 +PREHOOK: Input: default@bucket3_1@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket3_1 +POSTHOOK: Input: default@bucket3_1@ds=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 val_0 1 +0 val_0 1 +0 val_0 1 +2 val_2 1 +4 val_4 1 +8 val_8 1 +10 val_10 1 +12 val_12 1 +12 val_12 1 +18 val_18 1 +18 val_18 1 +20 val_20 1 +24 val_24 1 +24 val_24 1 +26 val_26 1 +26 val_26 1 +28 val_28 1 +30 val_30 1 +34 val_34 1 +42 val_42 1 +42 val_42 1 +44 val_44 1 +54 val_54 1 +58 val_58 1 +58 val_58 1 +64 val_64 1 +66 val_66 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +72 val_72 1 +72 val_72 1 +74 val_74 1 +76 val_76 1 +76 val_76 1 +78 val_78 1 +80 val_80 1 +82 val_82 1 +84 val_84 1 +84 val_84 1 +86 val_86 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +92 val_92 1 +96 val_96 1 +98 val_98 1 +98 val_98 1 +100 val_100 1 +100 val_100 1 +104 val_104 1 +104 val_104 1 +114 val_114 1 +116 val_116 1 +118 val_118 1 +118 val_118 1 +120 val_120 1 +120 val_120 1 +126 val_126 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +134 val_134 1 +134 val_134 1 +136 val_136 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +146 val_146 1 +146 val_146 1 +150 val_150 1 +152 val_152 1 +152 val_152 1 +156 val_156 1 +158 val_158 1 +160 val_160 1 +162 val_162 1 +164 val_164 1 +164 val_164 1 +166 val_166 1 +168 val_168 1 +170 val_170 1 +172 val_172 1 +172 val_172 1 +174 val_174 1 +174 val_174 1 +176 val_176 1 +176 val_176 1 +178 val_178 1 +180 val_180 1 +186 val_186 1 +190 val_190 1 +192 val_192 1 +194 val_194 1 +196 val_196 1 +200 val_200 1 +200 val_200 1 +202 val_202 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +214 val_214 1 +216 val_216 1 +216 val_216 1 +218 val_218 1 +222 val_222 1 +224 val_224 1 +224 val_224 1 +226 val_226 1 +228 val_228 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +238 val_238 1 +238 val_238 1 +242 val_242 1 +242 val_242 1 +244 val_244 1 +248 val_248 1 +252 val_252 1 +256 val_256 1 +256 val_256 1 +258 val_258 1 +260 val_260 1 +262 val_262 1 +266 val_266 1 +272 val_272 1 +272 val_272 1 +274 val_274 1 +278 val_278 1 +278 val_278 1 +280 val_280 1 +280 val_280 1 +282 val_282 1 +282 val_282 1 +284 val_284 1 +286 val_286 1 +288 val_288 1 +288 val_288 1 +292 val_292 1 +296 val_296 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +302 val_302 1 +306 val_306 1 +308 val_308 1 +310 val_310 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +322 val_322 1 +322 val_322 1 +332 val_332 1 +336 val_336 1 +338 val_338 1 +342 val_342 1 +342 val_342 1 +344 val_344 1 +344 val_344 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +356 val_356 1 +360 val_360 1 +362 val_362 1 +364 val_364 1 +366 val_366 1 +368 val_368 1 +374 val_374 1 +378 val_378 1 +382 val_382 1 +382 val_382 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +386 val_386 1 +392 val_392 1 +394 val_394 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +400 val_400 1 +402 val_402 1 +404 val_404 1 +404 val_404 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +414 val_414 1 +414 val_414 1 +418 val_418 1 +424 val_424 1 +424 val_424 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +432 val_432 1 +436 val_436 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +444 val_444 1 +446 val_446 1 +448 val_448 1 +452 val_452 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +458 val_458 1 +458 val_458 1 +460 val_460 1 +462 val_462 1 +462 val_462 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +470 val_470 1 +472 val_472 1 +478 val_478 1 +478 val_478 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +482 val_482 1 +484 val_484 1 +490 val_490 1 +492 val_492 1 +492 val_492 1 +494 val_494 1 +496 val_496 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 Index: ql/src/test/results/clientpositive/tez/bucket4.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/bucket4.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/bucket4.q.out (working copy) @@ -0,0 +1,491 @@ +PREHOOK: query: CREATE TABLE bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@bucket4_1 +PREHOOK: query: explain extended +insert overwrite table bucket4_1 +select * from src +PREHOOK: type: QUERY +POSTHOOK: query: explain extended +insert overwrite table bucket4_1 +select * from src +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucket4_1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + GatherStats: false + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + Reduce Output Operator + key expressions: + expr: UDFToInteger(_col0) + type: int + sort order: + + Map-reduce partition columns: + expr: UDFToInteger(_col0) + type: int + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src + name: default.src + Truncated Path -> Alias: + /src [src] + Needs Tagging: false + Reduce Operator Tree: + Extract + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 2 + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.types int:string +#### A masked pattern was here #### + name default.bucket4_1 + serialization.ddl struct bucket4_1 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket4_1 + TotalFiles: 2 + GatherStats: true + MultiFileSpray: true + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.types int:string +#### A masked pattern was here #### + name default.bucket4_1 + serialization.ddl struct bucket4_1 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket4_1 +#### A masked pattern was here #### + + Stage: Stage-3 + Stats-Aggr Operator +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table bucket4_1 +select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@bucket4_1 +POSTHOOK: query: insert overwrite table bucket4_1 +select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@bucket4_1 +POSTHOOK: Lineage: bucket4_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket4_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain +select * from bucket4_1 tablesample (bucket 1 out of 2) s +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * from bucket4_1 tablesample (bucket 1 out of 2) s +POSTHOOK: type: QUERY +POSTHOOK: Lineage: bucket4_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket4_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME bucket4_1) (TOK_TABLEBUCKETSAMPLE 1 2) s)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + s + TableScan + alias: s + Filter Operator + predicate: + expr: (((hash(key) & 2147483647) % 2) = 0) + type: boolean + Select Operator + expressions: + expr: key + type: int + expr: value + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select * from bucket4_1 tablesample (bucket 1 out of 2) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket4_1 +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket4_1 tablesample (bucket 1 out of 2) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket4_1 +#### A masked pattern was here #### +POSTHOOK: Lineage: bucket4_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket4_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 val_0 +0 val_0 +0 val_0 +2 val_2 +4 val_4 +8 val_8 +10 val_10 +12 val_12 +12 val_12 +18 val_18 +18 val_18 +20 val_20 +24 val_24 +24 val_24 +26 val_26 +26 val_26 +28 val_28 +30 val_30 +34 val_34 +42 val_42 +42 val_42 +44 val_44 +54 val_54 +58 val_58 +58 val_58 +64 val_64 +66 val_66 +70 val_70 +70 val_70 +70 val_70 +72 val_72 +72 val_72 +74 val_74 +76 val_76 +76 val_76 +78 val_78 +80 val_80 +82 val_82 +84 val_84 +84 val_84 +86 val_86 +90 val_90 +90 val_90 +90 val_90 +92 val_92 +96 val_96 +98 val_98 +98 val_98 +100 val_100 +100 val_100 +104 val_104 +104 val_104 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +120 val_120 +120 val_120 +126 val_126 +128 val_128 +128 val_128 +128 val_128 +134 val_134 +134 val_134 +136 val_136 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +146 val_146 +146 val_146 +150 val_150 +152 val_152 +152 val_152 +156 val_156 +158 val_158 +160 val_160 +162 val_162 +164 val_164 +164 val_164 +166 val_166 +168 val_168 +170 val_170 +172 val_172 +172 val_172 +174 val_174 +174 val_174 +176 val_176 +176 val_176 +178 val_178 +180 val_180 +186 val_186 +190 val_190 +192 val_192 +194 val_194 +196 val_196 +200 val_200 +200 val_200 +202 val_202 +208 val_208 +208 val_208 +208 val_208 +214 val_214 +216 val_216 +216 val_216 +218 val_218 +222 val_222 +224 val_224 +224 val_224 +226 val_226 +228 val_228 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +238 val_238 +238 val_238 +242 val_242 +242 val_242 +244 val_244 +248 val_248 +252 val_252 +256 val_256 +256 val_256 +258 val_258 +260 val_260 +262 val_262 +266 val_266 +272 val_272 +272 val_272 +274 val_274 +278 val_278 +278 val_278 +280 val_280 +280 val_280 +282 val_282 +282 val_282 +284 val_284 +286 val_286 +288 val_288 +288 val_288 +292 val_292 +296 val_296 +298 val_298 +298 val_298 +298 val_298 +302 val_302 +306 val_306 +308 val_308 +310 val_310 +316 val_316 +316 val_316 +316 val_316 +318 val_318 +318 val_318 +318 val_318 +322 val_322 +322 val_322 +332 val_332 +336 val_336 +338 val_338 +342 val_342 +342 val_342 +344 val_344 +344 val_344 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +356 val_356 +360 val_360 +362 val_362 +364 val_364 +366 val_366 +368 val_368 +374 val_374 +378 val_378 +382 val_382 +382 val_382 +384 val_384 +384 val_384 +384 val_384 +386 val_386 +392 val_392 +394 val_394 +396 val_396 +396 val_396 +396 val_396 +400 val_400 +402 val_402 +404 val_404 +404 val_404 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +414 val_414 +414 val_414 +418 val_418 +424 val_424 +424 val_424 +430 val_430 +430 val_430 +430 val_430 +432 val_432 +436 val_436 +438 val_438 +438 val_438 +438 val_438 +444 val_444 +446 val_446 +448 val_448 +452 val_452 +454 val_454 +454 val_454 +454 val_454 +458 val_458 +458 val_458 +460 val_460 +462 val_462 +462 val_462 +466 val_466 +466 val_466 +466 val_466 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +470 val_470 +472 val_472 +478 val_478 +478 val_478 +480 val_480 +480 val_480 +480 val_480 +482 val_482 +484 val_484 +490 val_490 +492 val_492 +492 val_492 +494 val_494 +496 val_496 +498 val_498 +498 val_498 +498 val_498 Index: ql/src/test/results/clientpositive/tez/count.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/count.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/count.q.out (working copy) @@ -0,0 +1,578 @@ +PREHOOK: query: create table abcd (a int, b int, c int, d int) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table abcd (a int, b int, c int, d int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@abcd +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd +PREHOOK: type: LOAD +PREHOOK: Output: default@abcd +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd +POSTHOOK: type: LOAD +POSTHOOK: Output: default@abcd +PREHOOK: query: select * from abcd +PREHOOK: type: QUERY +PREHOOK: Input: default@abcd +#### A masked pattern was here #### +POSTHOOK: query: select * from abcd +POSTHOOK: type: QUERY +POSTHOOK: Input: default@abcd +#### A masked pattern was here #### +NULL 35 23 6 +10 1000 50 1 +100 100 10 3 +12 NULL 80 2 +10 100 NULL 5 +10 100 45 4 +12 100 75 7 +PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a +PREHOOK: type: QUERY +POSTHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME abcd))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL a)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_TABLE_OR_COL d)))) (TOK_GROUPBY (TOK_TABLE_OR_COL a)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + abcd + TableScan + alias: abcd + Select Operator + expressions: + expr: a + type: int + expr: b + type: int + expr: c + type: int + expr: d + type: int + outputColumnNames: a, b, c, d + Group By Operator + aggregations: + expr: count(DISTINCT b) + expr: count(DISTINCT c) + expr: sum(d) + bucketGroup: false + keys: + expr: a + type: int + expr: b + type: int + expr: c + type: int + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Reduce Output Operator + key expressions: + expr: _col0 + type: int + expr: _col1 + type: int + expr: _col2 + type: int + sort order: +++ + Map-reduce partition columns: + expr: _col0 + type: int + tag: -1 + value expressions: + expr: _col3 + type: bigint + expr: _col4 + type: bigint + expr: _col5 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(DISTINCT KEY._col1:0._col0) + expr: count(DISTINCT KEY._col1:1._col0) + expr: sum(VALUE._col2) + bucketGroup: false + keys: + expr: KEY._col0 + type: int + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Select Operator + expressions: + expr: _col0 + type: int + expr: _col1 + type: bigint + expr: _col2 + type: bigint + expr: _col3 + type: bigint + outputColumnNames: _col0, _col1, _col2, _col3 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a +PREHOOK: type: QUERY +PREHOOK: Input: default@abcd +#### A masked pattern was here #### +POSTHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@abcd +#### A masked pattern was here #### +NULL 1 1 6 +10 2 2 10 +12 1 2 9 +100 1 1 3 +PREHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME abcd))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1)) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL a))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d)))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + abcd + TableScan + alias: abcd + Select Operator + expressions: + expr: a + type: int + expr: b + type: int + expr: c + type: int + expr: d + type: int + outputColumnNames: a, b, c, d + Group By Operator + aggregations: + expr: count(1) + expr: count() + expr: count(a) + expr: count(b) + expr: count(c) + expr: count(d) + expr: count(DISTINCT a) + expr: count(DISTINCT b) + expr: count(DISTINCT c) + expr: count(DISTINCT d) + expr: count(DISTINCT a, b) + expr: count(DISTINCT b, c) + expr: count(DISTINCT c, d) + expr: count(DISTINCT a, d) + expr: count(DISTINCT a, c) + expr: count(DISTINCT b, d) + expr: count(DISTINCT a, b, c) + expr: count(DISTINCT b, c, d) + expr: count(DISTINCT a, c, d) + expr: count(DISTINCT a, b, d) + expr: count(DISTINCT a, b, c, d) + bucketGroup: false + keys: + expr: a + type: int + expr: b + type: int + expr: c + type: int + expr: d + type: int + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24 + Reduce Output Operator + key expressions: + expr: _col0 + type: int + expr: _col1 + type: int + expr: _col2 + type: int + expr: _col3 + type: int + sort order: ++++ + tag: -1 + value expressions: + expr: _col4 + type: bigint + expr: _col5 + type: bigint + expr: _col6 + type: bigint + expr: _col7 + type: bigint + expr: _col8 + type: bigint + expr: _col9 + type: bigint + expr: _col10 + type: bigint + expr: _col11 + type: bigint + expr: _col12 + type: bigint + expr: _col13 + type: bigint + expr: _col14 + type: bigint + expr: _col15 + type: bigint + expr: _col16 + type: bigint + expr: _col17 + type: bigint + expr: _col18 + type: bigint + expr: _col19 + type: bigint + expr: _col20 + type: bigint + expr: _col21 + type: bigint + expr: _col22 + type: bigint + expr: _col23 + type: bigint + expr: _col24 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + expr: count(VALUE._col1) + expr: count(VALUE._col2) + expr: count(VALUE._col3) + expr: count(VALUE._col4) + expr: count(VALUE._col5) + expr: count(DISTINCT KEY._col0:0._col0) + expr: count(DISTINCT KEY._col0:1._col0) + expr: count(DISTINCT KEY._col0:2._col0) + expr: count(DISTINCT KEY._col0:3._col0) + expr: count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1) + expr: count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1) + expr: count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1) + expr: count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1) + expr: count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1) + expr: count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1) + expr: count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2) + expr: count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2) + expr: count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2) + expr: count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2) + expr: count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, KEY._col0:14._col3) + bucketGroup: false + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 + Select Operator + expressions: + expr: _col0 + type: bigint + expr: _col1 + type: bigint + expr: _col2 + type: bigint + expr: _col3 + type: bigint + expr: _col4 + type: bigint + expr: _col5 + type: bigint + expr: _col6 + type: bigint + expr: _col7 + type: bigint + expr: _col8 + type: bigint + expr: _col9 + type: bigint + expr: _col10 + type: bigint + expr: _col11 + type: bigint + expr: _col12 + type: bigint + expr: _col13 + type: bigint + expr: _col14 + type: bigint + expr: _col15 + type: bigint + expr: _col16 + type: bigint + expr: _col17 + type: bigint + expr: _col18 + type: bigint + expr: _col19 + type: bigint + expr: _col20 + type: bigint + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +PREHOOK: type: QUERY +PREHOOK: Input: default@abcd +#### A masked pattern was here #### +POSTHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +POSTHOOK: type: QUERY +POSTHOOK: Input: default@abcd +#### A masked pattern was here #### +7 7 6 6 6 7 3 3 6 7 4 5 6 6 5 6 4 5 5 5 4 +PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a +PREHOOK: type: QUERY +POSTHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME abcd))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL a)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_TABLE_OR_COL d)))) (TOK_GROUPBY (TOK_TABLE_OR_COL a)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + abcd + TableScan + alias: abcd + Select Operator + expressions: + expr: a + type: int + expr: b + type: int + expr: c + type: int + expr: d + type: int + outputColumnNames: a, b, c, d + Reduce Output Operator + key expressions: + expr: a + type: int + expr: b + type: int + expr: c + type: int + sort order: +++ + Map-reduce partition columns: + expr: a + type: int + tag: -1 + value expressions: + expr: d + type: int + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(DISTINCT KEY._col1:0._col0) + expr: count(DISTINCT KEY._col1:1._col0) + expr: sum(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: int + mode: complete + outputColumnNames: _col0, _col1, _col2, _col3 + Select Operator + expressions: + expr: _col0 + type: int + expr: _col1 + type: bigint + expr: _col2 + type: bigint + expr: _col3 + type: bigint + outputColumnNames: _col0, _col1, _col2, _col3 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a +PREHOOK: type: QUERY +PREHOOK: Input: default@abcd +#### A masked pattern was here #### +POSTHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@abcd +#### A masked pattern was here #### +NULL 1 1 6 +10 2 2 10 +12 1 2 9 +100 1 1 3 +PREHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME abcd))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1)) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL a))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d)))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + abcd + TableScan + alias: abcd + Select Operator + expressions: + expr: a + type: int + expr: b + type: int + expr: c + type: int + expr: d + type: int + outputColumnNames: a, b, c, d + Reduce Output Operator + key expressions: + expr: a + type: int + expr: b + type: int + expr: c + type: int + expr: d + type: int + sort order: ++++ + tag: -1 + value expressions: + expr: 1 + type: int + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(1) + expr: count() + expr: count(KEY._col0:0._col0) + expr: count(KEY._col0:1._col0) + expr: count(KEY._col0:2._col0) + expr: count(KEY._col0:3._col0) + expr: count(DISTINCT KEY._col0:0._col0) + expr: count(DISTINCT KEY._col0:1._col0) + expr: count(DISTINCT KEY._col0:2._col0) + expr: count(DISTINCT KEY._col0:3._col0) + expr: count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1) + expr: count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1) + expr: count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1) + expr: count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1) + expr: count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1) + expr: count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1) + expr: count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2) + expr: count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2) + expr: count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2) + expr: count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2) + expr: count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, KEY._col0:14._col3) + bucketGroup: false + mode: complete + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 + Select Operator + expressions: + expr: _col0 + type: bigint + expr: _col1 + type: bigint + expr: _col2 + type: bigint + expr: _col3 + type: bigint + expr: _col4 + type: bigint + expr: _col5 + type: bigint + expr: _col6 + type: bigint + expr: _col7 + type: bigint + expr: _col8 + type: bigint + expr: _col9 + type: bigint + expr: _col10 + type: bigint + expr: _col11 + type: bigint + expr: _col12 + type: bigint + expr: _col13 + type: bigint + expr: _col14 + type: bigint + expr: _col15 + type: bigint + expr: _col16 + type: bigint + expr: _col17 + type: bigint + expr: _col18 + type: bigint + expr: _col19 + type: bigint + expr: _col20 + type: bigint + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +PREHOOK: type: QUERY +PREHOOK: Input: default@abcd +#### A masked pattern was here #### +POSTHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +POSTHOOK: type: QUERY +POSTHOOK: Input: default@abcd +#### A masked pattern was here #### +7 7 6 6 6 7 3 3 6 7 4 5 6 6 5 6 4 5 5 5 4 Index: ql/src/test/results/clientpositive/tez/create_merge_compressed.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/create_merge_compressed.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/create_merge_compressed.q.out (working copy) @@ -0,0 +1,168 @@ +PREHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@src_rc_merge_test +PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test +PREHOOK: type: LOAD +PREHOOK: Output: default@src_rc_merge_test +POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test +POSTHOOK: type: LOAD +POSTHOOK: Output: default@src_rc_merge_test +PREHOOK: query: create table tgt_rc_merge_test(key int, value string) stored as rcfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table tgt_rc_merge_test(key int, value string) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@tgt_rc_merge_test +PREHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test +PREHOOK: type: QUERY +PREHOOK: Input: default@src_rc_merge_test +PREHOOK: Output: default@tgt_rc_merge_test +POSTHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_rc_merge_test +POSTHOOK: Output: default@tgt_rc_merge_test +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test +PREHOOK: type: QUERY +PREHOOK: Input: default@src_rc_merge_test +PREHOOK: Output: default@tgt_rc_merge_test +POSTHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_rc_merge_test +POSTHOOK: Output: default@tgt_rc_merge_test +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: show table extended like `tgt_rc_merge_test` +PREHOOK: type: SHOW_TABLESTATUS +POSTHOOK: query: show table extended like `tgt_rc_merge_test` +POSTHOOK: type: SHOW_TABLESTATUS +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +tableName:tgt_rc_merge_test +#### A masked pattern was here #### +inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat +outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat +columns:struct columns { i32 key, string value} +partitioned:false +partitionColumns: +totalNumberFiles:2 +totalFileSize:342 +maxFileSize:171 +minFileSize:171 +#### A masked pattern was here #### + +PREHOOK: query: select count(1) from tgt_rc_merge_test +PREHOOK: type: QUERY +PREHOOK: Input: default@tgt_rc_merge_test +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from tgt_rc_merge_test +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tgt_rc_merge_test +#### A masked pattern was here #### +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +10 +PREHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test +PREHOOK: type: QUERY +PREHOOK: Input: default@tgt_rc_merge_test +#### A masked pattern was here #### +POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tgt_rc_merge_test +#### A masked pattern was here #### +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +46 -751895388 +PREHOOK: query: alter table tgt_rc_merge_test concatenate +PREHOOK: type: ALTER_TABLE_MERGE +PREHOOK: Input: default@tgt_rc_merge_test +PREHOOK: Output: default@tgt_rc_merge_test +POSTHOOK: query: alter table tgt_rc_merge_test concatenate +POSTHOOK: type: ALTER_TABLE_MERGE +POSTHOOK: Input: default@tgt_rc_merge_test +POSTHOOK: Output: default@tgt_rc_merge_test +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: show table extended like `tgt_rc_merge_test` +PREHOOK: type: SHOW_TABLESTATUS +POSTHOOK: query: show table extended like `tgt_rc_merge_test` +POSTHOOK: type: SHOW_TABLESTATUS +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +tableName:tgt_rc_merge_test +#### A masked pattern was here #### +inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat +outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat +columns:struct columns { i32 key, string value} +partitioned:false +partitionColumns: +totalNumberFiles:1 +totalFileSize:243 +maxFileSize:243 +minFileSize:243 +#### A masked pattern was here #### + +PREHOOK: query: select count(1) from tgt_rc_merge_test +PREHOOK: type: QUERY +PREHOOK: Input: default@tgt_rc_merge_test +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from tgt_rc_merge_test +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tgt_rc_merge_test +#### A masked pattern was here #### +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +10 +PREHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test +PREHOOK: type: QUERY +PREHOOK: Input: default@tgt_rc_merge_test +#### A masked pattern was here #### +POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tgt_rc_merge_test +#### A masked pattern was here #### +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +46 -751895388 +PREHOOK: query: drop table src_rc_merge_test +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@src_rc_merge_test +PREHOOK: Output: default@src_rc_merge_test +POSTHOOK: query: drop table src_rc_merge_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@src_rc_merge_test +POSTHOOK: Output: default@src_rc_merge_test +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: drop table tgt_rc_merge_test +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@tgt_rc_merge_test +PREHOOK: Output: default@tgt_rc_merge_test +POSTHOOK: query: drop table tgt_rc_merge_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@tgt_rc_merge_test +POSTHOOK: Output: default@tgt_rc_merge_test +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] Index: ql/src/test/results/clientpositive/tez/cross_join.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/cross_join.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/cross_join.q.out (working copy) @@ -0,0 +1,189 @@ +PREHOOK: query: -- current +explain select src.key from src join src src2 +PREHOOK: type: QUERY +POSTHOOK: query: -- current +explain select src.key from src join src src2 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src)) (TOK_TABREF (TOK_TABNAME src) src2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src2 + TableScan + alias: src2 + Reduce Output Operator + sort order: + tag: 1 + Alias -> Map Operator Tree: + src + TableScan + alias: src + Reduce Output Operator + sort order: + tag: 0 + value expressions: + expr: key + type: string + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col0} + 1 + handleSkewJoin: false + outputColumnNames: _col0 + Select Operator + expressions: + expr: _col0 + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: -- ansi cross join +explain select src.key from src cross join src src2 +PREHOOK: type: QUERY +POSTHOOK: query: -- ansi cross join +explain select src.key from src cross join src src2 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_CROSSJOIN (TOK_TABREF (TOK_TABNAME src)) (TOK_TABREF (TOK_TABNAME src) src2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src2 + TableScan + alias: src2 + Reduce Output Operator + sort order: + tag: 1 + Alias -> Map Operator Tree: + src + TableScan + alias: src + Reduce Output Operator + sort order: + tag: 0 + value expressions: + expr: key + type: string + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col0} + 1 + handleSkewJoin: false + outputColumnNames: _col0 + Select Operator + expressions: + expr: _col0 + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: -- appending condition is allowed +explain select src.key from src cross join src src2 on src.key=src2.key +PREHOOK: type: QUERY +POSTHOOK: query: -- appending condition is allowed +explain select src.key from src cross join src src2 on src.key=src2.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_CROSSJOIN (TOK_TABREF (TOK_TABNAME src)) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL src2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src2 + TableScan + alias: src2 + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 1 + Alias -> Map Operator Tree: + src + TableScan + alias: src + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 0 + value expressions: + expr: key + type: string + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col0} + 1 + handleSkewJoin: false + outputColumnNames: _col0 + Select Operator + expressions: + expr: _col0 + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Index: ql/src/test/results/clientpositive/tez/ctas.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/ctas.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/ctas.q.out (working copy) @@ -0,0 +1,1215 @@ +PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) + +create table nzhang_Tmp(a int, b string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) + +create table nzhang_Tmp(a int, b string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@nzhang_Tmp +PREHOOK: query: select * from nzhang_Tmp +PREHOOK: type: QUERY +PREHOOK: Input: default@nzhang_tmp +#### A masked pattern was here #### +POSTHOOK: query: select * from nzhang_Tmp +POSTHOOK: type: QUERY +POSTHOOK: Input: default@nzhang_tmp +#### A masked pattern was here #### +PREHOOK: query: explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +ABSTRACT SYNTAX TREE: + (TOK_CREATETABLE (TOK_TABNAME nzhang_CTAS1) TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key) k) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-9 depends on stages: Stage-2, Stage-0 + Stage-3 depends on stages: Stage-9 + Stage-0 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + sort order: ++ + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + sort order: ++ + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_CTAS1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-9 + Create Table Operator: + Create Table + columns: k string, value string + if not exists: false + input format: org.apache.hadoop.mapred.TextInputFormat + # buckets: -1 + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: nzhang_CTAS1 + isExternal: false + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_CTAS1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_CTAS1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@nzhang_CTAS1 +PREHOOK: query: select * from nzhang_CTAS1 +PREHOOK: type: QUERY +PREHOOK: Input: default@nzhang_ctas1 +#### A masked pattern was here #### +POSTHOOK: query: select * from nzhang_CTAS1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@nzhang_ctas1 +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +PREHOOK: query: describe formatted nzhang_CTAS1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe formatted nzhang_CTAS1 +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +k string None +value string None + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 0 + rawDataSize 0 + totalSize 106 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain create table nzhang_ctas2 as select * from src sort by key, value limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: explain create table nzhang_ctas2 as select * from src sort by key, value limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +ABSTRACT SYNTAX TREE: + (TOK_CREATETABLE (TOK_TABNAME nzhang_ctas2) TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-9 depends on stages: Stage-2, Stage-0 + Stage-3 depends on stages: Stage-9 + Stage-0 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + sort order: ++ + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + sort order: ++ + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas2 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-9 + Create Table Operator: + Create Table + columns: key string, value string + if not exists: false + input format: org.apache.hadoop.mapred.TextInputFormat + # buckets: -1 + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: nzhang_ctas2 + isExternal: false + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas2 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas2 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@nzhang_ctas2 +PREHOOK: query: select * from nzhang_ctas2 +PREHOOK: type: QUERY +PREHOOK: Input: default@nzhang_ctas2 +#### A masked pattern was here #### +POSTHOOK: query: select * from nzhang_ctas2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@nzhang_ctas2 +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +PREHOOK: query: describe formatted nzhang_CTAS2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe formatted nzhang_CTAS2 +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +key string None +value string None + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 0 + rawDataSize 0 + totalSize 106 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +ABSTRACT SYNTAX TREE: + (TOK_CREATETABLE (TOK_TABNAME nzhang_ctas3) TOK_LIKETABLE (TOK_TABLESERIALIZER (TOK_SERDENAME "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe")) TOK_TBLRCFILE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (/ (TOK_TABLE_OR_COL key) 2) half_key) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_TABLE_OR_COL value) "_con") conb)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL half_key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL conb))) (TOK_LIMIT 10)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-9 depends on stages: Stage-2, Stage-0 + Stage-3 depends on stages: Stage-9 + Stage-0 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: (key / 2) + type: double + expr: concat(value, '_con') + type: string + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: double + expr: _col1 + type: string + sort order: ++ + tag: -1 + value expressions: + expr: _col0 + type: double + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + Reduce Output Operator + key expressions: + expr: _col0 + type: double + expr: _col1 + type: string + sort order: ++ + tag: -1 + value expressions: + expr: _col0 + type: double + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.nzhang_ctas3 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-9 + Create Table Operator: + Create Table + columns: half_key double, conb string + if not exists: false + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + # buckets: -1 + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: nzhang_ctas3 + isExternal: false + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-4 + Block level merge + + Stage: Stage-6 + Block level merge + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@nzhang_ctas3 +PREHOOK: query: select * from nzhang_ctas3 +PREHOOK: type: QUERY +PREHOOK: Input: default@nzhang_ctas3 +#### A masked pattern was here #### +POSTHOOK: query: select * from nzhang_ctas3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@nzhang_ctas3 +#### A masked pattern was here #### +0.0 val_0_con +0.0 val_0_con +0.0 val_0_con +1.0 val_2_con +2.0 val_4_con +2.5 val_5_con +2.5 val_5_con +2.5 val_5_con +4.0 val_8_con +4.5 val_9_con +PREHOOK: query: describe formatted nzhang_CTAS3 +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe formatted nzhang_CTAS3 +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +half_key double None +conb string None + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 0 + rawDataSize 0 + totalSize 199 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe +InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2 +PREHOOK: type: CREATETABLE +POSTHOOK: query: explain create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2 +POSTHOOK: type: CREATETABLE +ABSTRACT SYNTAX TREE: + (TOK_CREATETABLE (TOK_TABNAME nzhang_ctas3) TOK_IFNOTEXISTS TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 2)))) + +STAGE DEPENDENCIES: + +STAGE PLANS: +PREHOOK: query: create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2 +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2 +POSTHOOK: type: CREATETABLE +PREHOOK: query: select * from nzhang_ctas3 +PREHOOK: type: QUERY +PREHOOK: Input: default@nzhang_ctas3 +#### A masked pattern was here #### +POSTHOOK: query: select * from nzhang_ctas3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@nzhang_ctas3 +#### A masked pattern was here #### +0.0 val_0_con +0.0 val_0_con +0.0 val_0_con +1.0 val_2_con +2.0 val_4_con +2.5 val_5_con +2.5 val_5_con +2.5 val_5_con +4.0 val_8_con +4.5 val_9_con +PREHOOK: query: describe formatted nzhang_CTAS3 +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe formatted nzhang_CTAS3 +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +half_key double None +conb string None + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 0 + rawDataSize 0 + totalSize 199 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe +InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: explain create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: explain create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +ABSTRACT SYNTAX TREE: + (TOK_CREATETABLE (TOK_TABNAME nzhang_ctas4) TOK_LIKETABLE (TOK_TABLEROWFORMAT (TOK_SERDEPROPS (TOK_TABLEROWFORMATFIELD ','))) TOK_TBLTEXTFILE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-9 depends on stages: Stage-2, Stage-0 + Stage-3 depends on stages: Stage-9 + Stage-0 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + sort order: ++ + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + sort order: ++ + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas4 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-9 + Create Table Operator: + Create Table + columns: key string, value string + field delimiter: , + if not exists: false + input format: org.apache.hadoop.mapred.TextInputFormat + # buckets: -1 + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: nzhang_ctas4 + isExternal: false + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas4 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas4 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@nzhang_ctas4 +PREHOOK: query: select * from nzhang_ctas4 +PREHOOK: type: QUERY +PREHOOK: Input: default@nzhang_ctas4 +#### A masked pattern was here #### +POSTHOOK: query: select * from nzhang_ctas4 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@nzhang_ctas4 +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +PREHOOK: query: describe formatted nzhang_CTAS4 +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe formatted nzhang_CTAS4 +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +key string None +value string None + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 0 + rawDataSize 0 + totalSize 106 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + field.delim , + serialization.format , +PREHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +ABSTRACT SYNTAX TREE: + (TOK_CREATETABLE (TOK_TABNAME nzhang_ctas5) TOK_LIKETABLE (TOK_TABLEROWFORMAT (TOK_SERDEPROPS (TOK_TABLEROWFORMATFIELD ',') (TOK_TABLEROWFORMATLINES '\012'))) TOK_TBLTEXTFILE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-9 depends on stages: Stage-2, Stage-0 + Stage-3 depends on stages: Stage-9 + Stage-0 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + GatherStats: false + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + sort order: ++ + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src + name: default.src + Truncated Path -> Alias: + /src [src] + Needs Tagging: false + Reduce Operator Tree: + Extract + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + Limit + Statistics: + numRows: 10 dataSize: 2000 basicStatsState: COMPLETE colStatsState: NONE + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + sort order: ++ + Statistics: + numRows: 10 dataSize: 2000 basicStatsState: COMPLETE colStatsState: NONE + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Needs Tagging: false + Reduce Operator Tree: + Extract + Statistics: + numRows: 10 dataSize: 2000 basicStatsState: COMPLETE colStatsState: NONE + Limit + Statistics: + numRows: 10 dataSize: 2000 basicStatsState: COMPLETE colStatsState: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: + numRows: 10 dataSize: 2000 basicStatsState: COMPLETE colStatsState: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + columns.types string:string + field.delim , + line.delim + + name default.nzhang_ctas5 + serialization.format , + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas5 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-9 + Create Table Operator: + Create Table + columns: key string, value string + field delimiter: , + if not exists: false + input format: org.apache.hadoop.mapred.TextInputFormat + line delimiter: + + # buckets: -1 + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: nzhang_ctas5 + isExternal: false + + Stage: Stage-3 + Stats-Aggr Operator +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + GatherStats: false + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + columns.types string:string + field.delim , + line.delim + + name default.nzhang_ctas5 + serialization.format , + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas5 + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + columns.types string:string + field.delim , + line.delim + + name default.nzhang_ctas5 + serialization.format , + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + columns.types string:string + field.delim , + line.delim + + name default.nzhang_ctas5 + serialization.format , + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas5 + name: default.nzhang_ctas5 + Truncated Path -> Alias: +#### A masked pattern was here #### + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + GatherStats: false + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + columns.types string:string + field.delim , + line.delim + + name default.nzhang_ctas5 + serialization.format , + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas5 + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + columns.types string:string + field.delim , + line.delim + + name default.nzhang_ctas5 + serialization.format , + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + columns.types string:string + field.delim , + line.delim + + name default.nzhang_ctas5 + serialization.format , + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_ctas5 + name: default.nzhang_ctas5 + Truncated Path -> Alias: +#### A masked pattern was here #### + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@nzhang_ctas5 +PREHOOK: query: create table nzhang_ctas6 (key string, `to` string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table nzhang_ctas6 (key string, `to` string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@nzhang_ctas6 +PREHOOK: query: insert overwrite table nzhang_ctas6 select key, value from src tablesample (10 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@nzhang_ctas6 +POSTHOOK: query: insert overwrite table nzhang_ctas6 select key, value from src tablesample (10 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@nzhang_ctas6 +POSTHOOK: Lineage: nzhang_ctas6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_ctas6.to SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table nzhang_ctas7 as select key, `to` from nzhang_ctas6 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@nzhang_ctas6 +POSTHOOK: query: create table nzhang_ctas7 as select key, `to` from nzhang_ctas6 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@nzhang_ctas6 +POSTHOOK: Output: default@nzhang_ctas7 +POSTHOOK: Lineage: nzhang_ctas6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_ctas6.to SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/tez/custom_input_output_format.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/custom_input_output_format.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/custom_input_output_format.q.out (working copy) @@ -0,0 +1,95 @@ +PREHOOK: query: CREATE TABLE src1_rot13_iof(key STRING, value STRING) + STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13InputFormat' + OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13OutputFormat' +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE src1_rot13_iof(key STRING, value STRING) + STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13InputFormat' + OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13OutputFormat' +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@src1_rot13_iof +PREHOOK: query: DESCRIBE EXTENDED src1_rot13_iof +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED src1_rot13_iof +POSTHOOK: type: DESCTABLE +key string None +value string None + +#### A masked pattern was here #### +PREHOOK: query: SELECT * FROM src1 ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM src1 ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +#### A masked pattern was here #### + + + + + val_165 + val_193 + val_265 + val_27 + val_409 + val_484 +128 +146 val_146 +150 val_150 +213 val_213 +224 +238 val_238 +255 val_255 +273 val_273 +278 val_278 +311 val_311 +369 +401 val_401 +406 val_406 +66 val_66 +98 val_98 +PREHOOK: query: INSERT OVERWRITE TABLE src1_rot13_iof SELECT * FROM src1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: default@src1_rot13_iof +POSTHOOK: query: INSERT OVERWRITE TABLE src1_rot13_iof SELECT * FROM src1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@src1_rot13_iof +POSTHOOK: Lineage: src1_rot13_iof.key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src1_rot13_iof.value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM src1_rot13_iof ORDER BY key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@src1_rot13_iof +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM src1_rot13_iof ORDER BY key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1_rot13_iof +#### A masked pattern was here #### +POSTHOOK: Lineage: src1_rot13_iof.key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src1_rot13_iof.value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] + + + + + val_165 + val_193 + val_265 + val_27 + val_409 + val_484 +128 +146 val_146 +150 val_150 +213 val_213 +224 +238 val_238 +255 val_255 +273 val_273 +278 val_278 +311 val_311 +369 +401 val_401 +406 val_406 +66 val_66 +98 val_98 Index: ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out (working copy) @@ -0,0 +1,499 @@ +PREHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@bucket2_1 +PREHOOK: query: explain extended +insert overwrite table bucket2_1 +select * from src +PREHOOK: type: QUERY +POSTHOOK: query: explain extended +insert overwrite table bucket2_1 +select * from src +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucket2_1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + GatherStats: false + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + Reduce Output Operator + sort order: + Map-reduce partition columns: + expr: UDFToInteger(_col0) + type: int + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src + name: default.src + Truncated Path -> Alias: + /src [src] + Needs Tagging: false + Reduce Operator Tree: + Extract + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 2 + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.types int:string +#### A masked pattern was here #### + name default.bucket2_1 + serialization.ddl struct bucket2_1 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket2_1 + TotalFiles: 2 + GatherStats: true + MultiFileSpray: true + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.types int:string +#### A masked pattern was here #### + name default.bucket2_1 + serialization.ddl struct bucket2_1 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket2_1 +#### A masked pattern was here #### + + Stage: Stage-3 + Stats-Aggr Operator +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table bucket2_1 +select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@bucket2_1 +POSTHOOK: query: insert overwrite table bucket2_1 +select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@bucket2_1 +POSTHOOK: Lineage: bucket2_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket2_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain +select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key +POSTHOOK: type: QUERY +POSTHOOK: Lineage: bucket2_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket2_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME bucket2_1) (TOK_TABLEBUCKETSAMPLE 1 2) s)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + s + TableScan + alias: s + Filter Operator + predicate: + expr: (((hash(key) & 2147483647) % 2) = 0) + type: boolean + Select Operator + expressions: + expr: key + type: int + expr: value + type: string + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: int + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: int + expr: _col1 + type: string + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket2_1 +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket2_1 +#### A masked pattern was here #### +POSTHOOK: Lineage: bucket2_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket2_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 val_0 +0 val_0 +0 val_0 +2 val_2 +4 val_4 +8 val_8 +10 val_10 +12 val_12 +12 val_12 +18 val_18 +18 val_18 +20 val_20 +24 val_24 +24 val_24 +26 val_26 +26 val_26 +28 val_28 +30 val_30 +34 val_34 +42 val_42 +42 val_42 +44 val_44 +54 val_54 +58 val_58 +58 val_58 +64 val_64 +66 val_66 +70 val_70 +70 val_70 +70 val_70 +72 val_72 +72 val_72 +74 val_74 +76 val_76 +76 val_76 +78 val_78 +80 val_80 +82 val_82 +84 val_84 +84 val_84 +86 val_86 +90 val_90 +90 val_90 +90 val_90 +92 val_92 +96 val_96 +98 val_98 +98 val_98 +100 val_100 +100 val_100 +104 val_104 +104 val_104 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +120 val_120 +120 val_120 +126 val_126 +128 val_128 +128 val_128 +128 val_128 +134 val_134 +134 val_134 +136 val_136 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +146 val_146 +146 val_146 +150 val_150 +152 val_152 +152 val_152 +156 val_156 +158 val_158 +160 val_160 +162 val_162 +164 val_164 +164 val_164 +166 val_166 +168 val_168 +170 val_170 +172 val_172 +172 val_172 +174 val_174 +174 val_174 +176 val_176 +176 val_176 +178 val_178 +180 val_180 +186 val_186 +190 val_190 +192 val_192 +194 val_194 +196 val_196 +200 val_200 +200 val_200 +202 val_202 +208 val_208 +208 val_208 +208 val_208 +214 val_214 +216 val_216 +216 val_216 +218 val_218 +222 val_222 +224 val_224 +224 val_224 +226 val_226 +228 val_228 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +238 val_238 +238 val_238 +242 val_242 +242 val_242 +244 val_244 +248 val_248 +252 val_252 +256 val_256 +256 val_256 +258 val_258 +260 val_260 +262 val_262 +266 val_266 +272 val_272 +272 val_272 +274 val_274 +278 val_278 +278 val_278 +280 val_280 +280 val_280 +282 val_282 +282 val_282 +284 val_284 +286 val_286 +288 val_288 +288 val_288 +292 val_292 +296 val_296 +298 val_298 +298 val_298 +298 val_298 +302 val_302 +306 val_306 +308 val_308 +310 val_310 +316 val_316 +316 val_316 +316 val_316 +318 val_318 +318 val_318 +318 val_318 +322 val_322 +322 val_322 +332 val_332 +336 val_336 +338 val_338 +342 val_342 +342 val_342 +344 val_344 +344 val_344 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +356 val_356 +360 val_360 +362 val_362 +364 val_364 +366 val_366 +368 val_368 +374 val_374 +378 val_378 +382 val_382 +382 val_382 +384 val_384 +384 val_384 +384 val_384 +386 val_386 +392 val_392 +394 val_394 +396 val_396 +396 val_396 +396 val_396 +400 val_400 +402 val_402 +404 val_404 +404 val_404 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +414 val_414 +414 val_414 +418 val_418 +424 val_424 +424 val_424 +430 val_430 +430 val_430 +430 val_430 +432 val_432 +436 val_436 +438 val_438 +438 val_438 +438 val_438 +444 val_444 +446 val_446 +448 val_448 +452 val_452 +454 val_454 +454 val_454 +454 val_454 +458 val_458 +458 val_458 +460 val_460 +462 val_462 +462 val_462 +466 val_466 +466 val_466 +466 val_466 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +470 val_470 +472 val_472 +478 val_478 +478 val_478 +480 val_480 +480 val_480 +480 val_480 +482 val_482 +484 val_484 +490 val_490 +492 val_492 +492 val_492 +494 val_494 +496 val_496 +498 val_498 +498 val_498 +498 val_498 Index: ql/src/test/results/clientpositive/tez/enforce_order.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/enforce_order.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/enforce_order.q.out (working copy) @@ -0,0 +1,84 @@ +PREHOOK: query: drop table table_asc +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table table_asc +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table table_desc +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table table_desc +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table table_asc(key string, value string) clustered by (key) sorted by (key ASC) into 1 BUCKETS +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table table_asc(key string, value string) clustered by (key) sorted by (key ASC) into 1 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@table_asc +PREHOOK: query: create table table_desc(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table table_desc(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@table_desc +PREHOOK: query: insert overwrite table table_asc select key, value from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@table_asc +POSTHOOK: query: insert overwrite table table_asc select key, value from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@table_asc +POSTHOOK: Lineage: table_asc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_asc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table table_desc select key, value from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@table_desc +POSTHOOK: query: insert overwrite table table_desc select key, value from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@table_desc +POSTHOOK: Lineage: table_asc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_asc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: table_desc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_desc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from table_asc limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@table_asc +#### A masked pattern was here #### +POSTHOOK: query: select * from table_asc limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@table_asc +#### A masked pattern was here #### +POSTHOOK: Lineage: table_asc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_asc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: table_desc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_desc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +PREHOOK: query: select * from table_desc limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@table_desc +#### A masked pattern was here #### +POSTHOOK: query: select * from table_desc limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@table_desc +#### A masked pattern was here #### +POSTHOOK: Lineage: table_asc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_asc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: table_desc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_desc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +98 val_98 +98 val_98 +97 val_97 +97 val_97 +96 val_96 +95 val_95 +95 val_95 +92 val_92 +90 val_90 +90 val_90 Index: ql/src/test/results/clientpositive/tez/fileformat_mix.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/fileformat_mix.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/fileformat_mix.q.out (working copy) @@ -0,0 +1,573 @@ +PREHOOK: query: create table fileformat_mix_test (src int, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table fileformat_mix_test (src int, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@fileformat_mix_test +PREHOOK: query: alter table fileformat_mix_test set fileformat Sequencefile +PREHOOK: type: ALTERTABLE_FILEFORMAT +PREHOOK: Input: default@fileformat_mix_test +PREHOOK: Output: default@fileformat_mix_test +POSTHOOK: query: alter table fileformat_mix_test set fileformat Sequencefile +POSTHOOK: type: ALTERTABLE_FILEFORMAT +POSTHOOK: Input: default@fileformat_mix_test +POSTHOOK: Output: default@fileformat_mix_test +PREHOOK: query: insert overwrite table fileformat_mix_test partition (ds='1') +select key, value from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@fileformat_mix_test@ds=1 +POSTHOOK: query: insert overwrite table fileformat_mix_test partition (ds='1') +select key, value from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@fileformat_mix_test@ds=1 +POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).src EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table fileformat_mix_test add partition (ds='2') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@fileformat_mix_test +POSTHOOK: query: alter table fileformat_mix_test add partition (ds='2') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@fileformat_mix_test +POSTHOOK: Output: default@fileformat_mix_test@ds=2 +POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).src EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table fileformat_mix_test set fileformat rcfile +PREHOOK: type: ALTERTABLE_FILEFORMAT +PREHOOK: Input: default@fileformat_mix_test +PREHOOK: Output: default@fileformat_mix_test +POSTHOOK: query: alter table fileformat_mix_test set fileformat rcfile +POSTHOOK: type: ALTERTABLE_FILEFORMAT +POSTHOOK: Input: default@fileformat_mix_test +POSTHOOK: Output: default@fileformat_mix_test +POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).src EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select count(1) from fileformat_mix_test +PREHOOK: type: QUERY +PREHOOK: Input: default@fileformat_mix_test +PREHOOK: Input: default@fileformat_mix_test@ds=1 +PREHOOK: Input: default@fileformat_mix_test@ds=2 +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from fileformat_mix_test +POSTHOOK: type: QUERY +POSTHOOK: Input: default@fileformat_mix_test +POSTHOOK: Input: default@fileformat_mix_test@ds=1 +POSTHOOK: Input: default@fileformat_mix_test@ds=2 +#### A masked pattern was here #### +POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).src EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +500 +PREHOOK: query: select src from fileformat_mix_test +PREHOOK: type: QUERY +PREHOOK: Input: default@fileformat_mix_test +PREHOOK: Input: default@fileformat_mix_test@ds=1 +PREHOOK: Input: default@fileformat_mix_test@ds=2 +#### A masked pattern was here #### +POSTHOOK: query: select src from fileformat_mix_test +POSTHOOK: type: QUERY +POSTHOOK: Input: default@fileformat_mix_test +POSTHOOK: Input: default@fileformat_mix_test@ds=1 +POSTHOOK: Input: default@fileformat_mix_test@ds=2 +#### A masked pattern was here #### +POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).src EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 +86 +311 +27 +165 +409 +255 +278 +98 +484 +265 +193 +401 +150 +273 +224 +369 +66 +128 +213 +146 +406 +429 +374 +152 +469 +145 +495 +37 +327 +281 +277 +209 +15 +82 +403 +166 +417 +430 +252 +292 +219 +287 +153 +193 +338 +446 +459 +394 +237 +482 +174 +413 +494 +207 +199 +466 +208 +174 +399 +396 +247 +417 +489 +162 +377 +397 +309 +365 +266 +439 +342 +367 +325 +167 +195 +475 +17 +113 +155 +203 +339 +0 +455 +128 +311 +316 +57 +302 +205 +149 +438 +345 +129 +170 +20 +489 +157 +378 +221 +92 +111 +47 +72 +4 +280 +35 +427 +277 +208 +356 +399 +169 +382 +498 +125 +386 +437 +469 +192 +286 +187 +176 +54 +459 +51 +138 +103 +239 +213 +216 +430 +278 +176 +289 +221 +65 +318 +332 +311 +275 +137 +241 +83 +333 +180 +284 +12 +230 +181 +67 +260 +404 +384 +489 +353 +373 +272 +138 +217 +84 +348 +466 +58 +8 +411 +230 +208 +348 +24 +463 +431 +179 +172 +42 +129 +158 +119 +496 +0 +322 +197 +468 +393 +454 +100 +298 +199 +191 +418 +96 +26 +165 +327 +230 +205 +120 +131 +51 +404 +43 +436 +156 +469 +468 +308 +95 +196 +288 +481 +457 +98 +282 +197 +187 +318 +318 +409 +470 +137 +369 +316 +169 +413 +85 +77 +0 +490 +87 +364 +179 +118 +134 +395 +282 +138 +238 +419 +15 +118 +72 +90 +307 +19 +435 +10 +277 +273 +306 +224 +309 +389 +327 +242 +369 +392 +272 +331 +401 +242 +452 +177 +226 +5 +497 +402 +396 +317 +395 +58 +35 +336 +95 +11 +168 +34 +229 +233 +143 +472 +322 +498 +160 +195 +42 +321 +430 +119 +489 +458 +78 +76 +41 +223 +492 +149 +449 +218 +228 +138 +453 +30 +209 +64 +468 +76 +74 +342 +69 +230 +33 +368 +103 +296 +113 +216 +367 +344 +167 +274 +219 +239 +485 +116 +223 +256 +263 +70 +487 +480 +401 +288 +191 +5 +244 +438 +128 +467 +432 +202 +316 +229 +469 +463 +280 +2 +35 +283 +331 +235 +80 +44 +193 +321 +335 +104 +466 +366 +175 +403 +483 +53 +105 +257 +406 +409 +190 +406 +401 +114 +258 +90 +203 +262 +348 +424 +12 +396 +201 +217 +164 +431 +454 +478 +298 +125 +431 +164 +424 +187 +382 +5 +70 +397 +480 +291 +24 +351 +255 +104 +70 +163 +438 +119 +414 +200 +491 +237 +439 +360 +248 +479 +305 +417 +199 +444 +120 +429 +169 +443 +323 +325 +277 +230 +478 +178 +468 +310 +317 +333 +493 +460 +207 +249 +265 +480 +83 +136 +353 +172 +214 +462 +233 +406 +133 +175 +189 +454 +375 +401 +421 +407 +384 +256 +26 +134 +67 +384 +379 +18 +462 +492 +100 +298 +9 +341 +498 +146 +458 +362 +186 +285 +348 +167 +18 +273 +183 +281 +344 +97 +469 +315 +84 +28 +37 +448 +152 +348 +307 +194 +414 +477 +222 +126 +90 +169 +403 +400 +200 +97 Index: ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out (working copy) @@ -0,0 +1,360 @@ +PREHOOK: query: CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@filter_join_breaktask +PREHOOK: query: INSERT OVERWRITE TABLE filter_join_breaktask PARTITION(ds='2008-04-08') +SELECT key, value from src1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: default@filter_join_breaktask@ds=2008-04-08 +POSTHOOK: query: INSERT OVERWRITE TABLE filter_join_breaktask PARTITION(ds='2008-04-08') +SELECT key, value from src1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@filter_join_breaktask@ds=2008-04-08 +POSTHOOK: Lineage: filter_join_breaktask PARTITION(ds=2008-04-08).key EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: filter_join_breaktask PARTITION(ds=2008-04-08).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: EXPLAIN EXTENDED +SELECT f.key, g.value +FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null) +JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='') +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN EXTENDED +SELECT f.key, g.value +FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null) +JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='') +POSTHOOK: type: QUERY +POSTHOOK: Lineage: filter_join_breaktask PARTITION(ds=2008-04-08).key EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: filter_join_breaktask PARTITION(ds=2008-04-08).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_TABREF (TOK_TABNAME filter_join_breaktask) f) (TOK_TABREF (TOK_TABNAME filter_join_breaktask) m) (AND (AND (AND (= (. (TOK_TABLE_OR_COL f) key) (. (TOK_TABLE_OR_COL m) key)) (= (. (TOK_TABLE_OR_COL f) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL m) ds) '2008-04-08')) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL f) key)))) (TOK_TABREF (TOK_TABNAME filter_join_breaktask) g) (AND (AND (AND (AND (= (. (TOK_TABLE_OR_COL g) value) (. (TOK_TABLE_OR_COL m) value)) (= (. (TOK_TABLE_OR_COL g) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL m) ds) '2008-04-08')) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL m) value))) (!= (. (TOK_TABLE_OR_COL m) value) '')))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL f) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL g) value))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + f + TableScan + alias: f + Statistics: + numRows: 59 dataSize: 236 basicStatsState: COMPLETE colStatsState: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: + expr: key is not null + type: boolean + Statistics: + numRows: 30 dataSize: 120 basicStatsState: COMPLETE colStatsState: NONE + Reduce Output Operator + key expressions: + expr: key + type: int + sort order: + + Map-reduce partition columns: + expr: key + type: int + Statistics: + numRows: 30 dataSize: 120 basicStatsState: COMPLETE colStatsState: NONE + tag: 0 + value expressions: + expr: key + type: int + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.types int:string +#### A masked pattern was here #### + name default.filter_join_breaktask + numFiles 1 + numRows 0 + partition_columns ds + rawDataSize 0 + serialization.ddl struct filter_join_breaktask { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 236 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.types int:string +#### A masked pattern was here #### + name default.filter_join_breaktask + partition_columns ds + serialization.ddl struct filter_join_breaktask { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.filter_join_breaktask + name: default.filter_join_breaktask + Truncated Path -> Alias: + /filter_join_breaktask/ds=2008-04-08 [f] + Alias -> Map Operator Tree: + m + TableScan + alias: m + Statistics: + numRows: 2 dataSize: 236 basicStatsState: COMPLETE colStatsState: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: + expr: ((key is not null and value is not null) and (value <> '')) + type: boolean + Statistics: + numRows: 1 dataSize: 118 basicStatsState: COMPLETE colStatsState: NONE + Reduce Output Operator + key expressions: + expr: key + type: int + sort order: + + Map-reduce partition columns: + expr: key + type: int + Statistics: + numRows: 1 dataSize: 118 basicStatsState: COMPLETE colStatsState: NONE + tag: 1 + value expressions: + expr: value + type: string + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.types int:string +#### A masked pattern was here #### + name default.filter_join_breaktask + numFiles 1 + numRows 0 + partition_columns ds + rawDataSize 0 + serialization.ddl struct filter_join_breaktask { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 236 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.types int:string +#### A masked pattern was here #### + name default.filter_join_breaktask + partition_columns ds + serialization.ddl struct filter_join_breaktask { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.filter_join_breaktask + name: default.filter_join_breaktask + Truncated Path -> Alias: + /filter_join_breaktask/ds=2008-04-08 [m] + Needs Tagging: true + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col0} + 1 {VALUE._col1} + handleSkewJoin: false + outputColumnNames: _col0, _col6 + Statistics: + numRows: 33 dataSize: 132 basicStatsState: COMPLETE colStatsState: NONE + Reduce Output Operator + key expressions: + expr: _col6 + type: string + sort order: + + Map-reduce partition columns: + expr: _col6 + type: string + Statistics: + numRows: 33 dataSize: 132 basicStatsState: COMPLETE colStatsState: NONE + tag: 0 + value expressions: + expr: _col0 + type: int + Alias -> Map Operator Tree: + g + TableScan + alias: g + Statistics: + numRows: 2 dataSize: 236 basicStatsState: COMPLETE colStatsState: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: + expr: (value <> '') + type: boolean + Statistics: + numRows: 2 dataSize: 236 basicStatsState: COMPLETE colStatsState: NONE + Reduce Output Operator + key expressions: + expr: value + type: string + sort order: + + Map-reduce partition columns: + expr: value + type: string + Statistics: + numRows: 2 dataSize: 236 basicStatsState: COMPLETE colStatsState: NONE + tag: 1 + value expressions: + expr: value + type: string + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.types int:string +#### A masked pattern was here #### + name default.filter_join_breaktask + numFiles 1 + numRows 0 + partition_columns ds + rawDataSize 0 + serialization.ddl struct filter_join_breaktask { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 236 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.types int:string +#### A masked pattern was here #### + name default.filter_join_breaktask + partition_columns ds + serialization.ddl struct filter_join_breaktask { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.filter_join_breaktask + name: default.filter_join_breaktask + Truncated Path -> Alias: + /filter_join_breaktask/ds=2008-04-08 [g] + Needs Tagging: true + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col0} + 1 {VALUE._col1} + handleSkewJoin: false + outputColumnNames: _col0, _col11 + Statistics: + numRows: 36 dataSize: 145 basicStatsState: COMPLETE colStatsState: NONE + Select Operator + expressions: + expr: _col0 + type: int + expr: _col11 + type: string + outputColumnNames: _col0, _col1 + Statistics: + numRows: 36 dataSize: 145 basicStatsState: COMPLETE colStatsState: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: + numRows: 36 dataSize: 145 basicStatsState: COMPLETE colStatsState: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + columns.types int:string + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: SELECT f.key, g.value +FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null) +JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='') +PREHOOK: type: QUERY +PREHOOK: Input: default@filter_join_breaktask +PREHOOK: Input: default@filter_join_breaktask@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: SELECT f.key, g.value +FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null) +JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@filter_join_breaktask +POSTHOOK: Input: default@filter_join_breaktask@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: Lineage: filter_join_breaktask PARTITION(ds=2008-04-08).key EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: filter_join_breaktask PARTITION(ds=2008-04-08).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +146 val_146 +150 val_150 +213 val_213 +238 val_238 +255 val_255 +273 val_273 +278 val_278 +311 val_311 +401 val_401 +406 val_406 +66 val_66 +98 val_98 Index: ql/src/test/results/clientpositive/tez/filter_join_breaktask2.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/filter_join_breaktask2.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/filter_join_breaktask2.q.out (working copy) @@ -0,0 +1,931 @@ +PREHOOK: query: create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) +partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) +partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@T1 +PREHOOK: query: create table T2(c1 string, c2 string, c3 string, c0 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table T2(c1 string, c2 string, c3 string, c0 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@T2 +PREHOOK: query: create table T3 (c0 bigint, c1 bigint, c2 int) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table T3 (c0 bigint, c1 bigint, c2 int) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@T3 +PREHOOK: query: create table T4 (c0 bigint, c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string, c26 string, c27 string, c28 string, c29 string, c30 string, c31 string, c32 string, c33 string, c34 string, c35 string, c36 string, c37 string, c38 string, c39 string, c40 string, c41 string, c42 string, c43 string, c44 string, c45 string, c46 string, c47 string, c48 string, c49 string, c50 string, c51 string, c52 string, c53 string, c54 string, c55 string, c56 string, c57 string, c58 string, c59 string, c60 string, c61 string, c62 string, c63 string, c64 string, c65 string, c66 string, c67 bigint, c68 string, c69 string, c70 bigint, c71 bigint, c72 bigint, c73 string, c74 string, c75 string, c76 string, c77 string, c78 string, c79 string, c80 string, c81 bigint, c82 bigint, c83 bigint) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table T4 (c0 bigint, c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string, c26 string, c27 string, c28 string, c29 string, c30 string, c31 string, c32 string, c33 string, c34 string, c35 string, c36 string, c37 string, c38 string, c39 string, c40 string, c41 string, c42 string, c43 string, c44 string, c45 string, c46 string, c47 string, c48 string, c49 string, c50 string, c51 string, c52 string, c53 string, c54 string, c55 string, c56 string, c57 string, c58 string, c59 string, c60 string, c61 string, c62 string, c63 string, c64 string, c65 string, c66 string, c67 bigint, c68 string, c69 string, c70 bigint, c71 bigint, c72 bigint, c73 string, c74 string, c75 string, c76 string, c77 string, c78 string, c79 string, c80 string, c81 bigint, c82 bigint, c83 bigint) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@T4 +PREHOOK: query: insert overwrite table T1 partition (ds='2010-04-17') select '5', '1', '1', '1', 0, 0,4 from src tablesample (1 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@t1@ds=2010-04-17 +POSTHOOK: query: insert overwrite table T1 partition (ds='2010-04-17') select '5', '1', '1', '1', 0, 0,4 from src tablesample (1 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@t1@ds=2010-04-17 +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c7 SIMPLE [] +PREHOOK: query: insert overwrite table T2 partition(ds='2010-04-17') select '5','name', NULL, '2', 'kavin',NULL, '9', 'c', '8', '0', '0', '7', '1','2', '0', '3','2', NULL, '1', NULL, '3','2','0','0','5','10' from src tablesample (1 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@t2@ds=2010-04-17 +POSTHOOK: query: insert overwrite table T2 partition(ds='2010-04-17') select '5','name', NULL, '2', 'kavin',NULL, '9', 'c', '8', '0', '0', '7', '1','2', '0', '3','2', NULL, '1', NULL, '3','2','0','0','5','10' from src tablesample (1 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@t2@ds=2010-04-17 +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c0 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c17 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c19 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c23 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c24 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c25 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c9 SIMPLE [] +PREHOOK: query: insert overwrite table T3 partition (ds='2010-04-17') select 4,5,0 from src tablesample (1 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@t3@ds=2010-04-17 +POSTHOOK: query: insert overwrite table T3 partition (ds='2010-04-17') select 4,5,0 from src tablesample (1 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@t3@ds=2010-04-17 +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c0 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c17 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c19 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c23 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c24 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c25 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c9 SIMPLE [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c0 EXPRESSION [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c1 EXPRESSION [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c2 SIMPLE [] +PREHOOK: query: insert overwrite table T4 partition(ds='2010-04-17') +select 4,'1','1','8','4','5','1','0','9','U','2','2', '0','2','1','1','J','C','A','U', '2','s', '2',NULL, NULL, NULL,NULL, NULL, NULL,'1','j', 'S', '6',NULL,'1', '2', 'J', 'g', '1', 'e', '2', '1', '2', 'U', 'P', 'p', '3', '0', '0', '0', '1', '1', '1', '0', '0', '0', '6', '2', 'j',NULL, NULL, NULL,NULL,NULL, NULL, '5',NULL, 'j', 'j', 2, 2, 1, '2', '2', '1', '1', '1', '1', '1', '1', 1, 1, 32,NULL from src limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@t4@ds=2010-04-17 +POSTHOOK: query: insert overwrite table T4 partition(ds='2010-04-17') +select 4,'1','1','8','4','5','1','0','9','U','2','2', '0','2','1','1','J','C','A','U', '2','s', '2',NULL, NULL, NULL,NULL, NULL, NULL,'1','j', 'S', '6',NULL,'1', '2', 'J', 'g', '1', 'e', '2', '1', '2', 'U', 'P', 'p', '3', '0', '0', '0', '1', '1', '1', '0', '0', '0', '6', '2', 'j',NULL, NULL, NULL,NULL,NULL, NULL, '5',NULL, 'j', 'j', 2, 2, 1, '2', '2', '1', '1', '1', '1', '1', '1', 1, 1, 32,NULL from src limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@t4@ds=2010-04-17 +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c0 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c17 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c19 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c23 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c24 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c25 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c9 SIMPLE [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c0 EXPRESSION [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c1 EXPRESSION [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c0 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c17 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c19 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c23 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c24 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c25 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c26 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c27 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c28 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c29 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c30 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c31 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c32 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c33 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c34 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c35 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c36 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c37 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c38 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c39 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c40 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c41 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c42 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c43 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c44 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c45 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c46 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c47 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c48 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c49 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c50 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c51 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c52 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c53 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c54 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c55 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c56 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c57 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c58 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c59 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c60 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c61 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c62 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c63 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c64 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c65 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c66 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c67 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c68 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c69 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c70 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c71 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c72 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c73 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c74 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c75 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c76 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c77 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c78 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c79 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c80 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c81 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c82 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c83 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c9 SIMPLE [] +PREHOOK: query: select * from T2 +PREHOOK: type: QUERY +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@ds=2010-04-17 +#### A masked pattern was here #### +POSTHOOK: query: select * from T2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@ds=2010-04-17 +#### A masked pattern was here #### +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c0 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c17 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c19 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c23 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c24 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c25 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c9 SIMPLE [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c0 EXPRESSION [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c1 EXPRESSION [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c0 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c17 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c19 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c23 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c24 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c25 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c26 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c27 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c28 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c29 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c30 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c31 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c32 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c33 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c34 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c35 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c36 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c37 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c38 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c39 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c40 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c41 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c42 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c43 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c44 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c45 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c46 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c47 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c48 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c49 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c50 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c51 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c52 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c53 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c54 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c55 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c56 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c57 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c58 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c59 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c60 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c61 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c62 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c63 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c64 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c65 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c66 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c67 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c68 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c69 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c70 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c71 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c72 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c73 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c74 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c75 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c76 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c77 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c78 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c79 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c80 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c81 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c82 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c83 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c9 SIMPLE [] +5 name NULL 2 kavin NULL 9 c 8 0 0 7 1 2 0 3 2 NULL 1 NULL 3 2 0 0 5 10 2010-04-17 +PREHOOK: query: select * from T1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@ds=2010-04-17 +#### A masked pattern was here #### +POSTHOOK: query: select * from T1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@ds=2010-04-17 +#### A masked pattern was here #### +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c0 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c17 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c19 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c23 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c24 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c25 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c9 SIMPLE [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c0 EXPRESSION [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c1 EXPRESSION [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c0 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c17 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c19 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c23 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c24 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c25 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c26 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c27 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c28 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c29 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c30 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c31 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c32 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c33 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c34 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c35 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c36 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c37 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c38 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c39 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c40 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c41 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c42 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c43 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c44 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c45 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c46 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c47 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c48 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c49 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c50 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c51 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c52 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c53 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c54 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c55 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c56 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c57 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c58 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c59 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c60 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c61 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c62 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c63 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c64 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c65 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c66 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c67 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c68 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c69 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c70 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c71 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c72 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c73 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c74 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c75 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c76 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c77 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c78 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c79 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c80 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c81 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c82 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c83 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c9 SIMPLE [] +5 1 1 1 0 0 4 2010-04-17 +PREHOOK: query: select * from T3 +PREHOOK: type: QUERY +PREHOOK: Input: default@t3 +PREHOOK: Input: default@t3@ds=2010-04-17 +#### A masked pattern was here #### +POSTHOOK: query: select * from T3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t3@ds=2010-04-17 +#### A masked pattern was here #### +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c0 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c17 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c19 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c23 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c24 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c25 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c9 SIMPLE [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c0 EXPRESSION [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c1 EXPRESSION [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c0 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c17 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c19 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c23 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c24 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c25 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c26 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c27 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c28 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c29 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c30 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c31 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c32 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c33 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c34 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c35 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c36 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c37 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c38 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c39 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c40 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c41 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c42 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c43 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c44 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c45 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c46 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c47 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c48 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c49 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c50 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c51 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c52 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c53 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c54 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c55 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c56 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c57 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c58 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c59 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c60 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c61 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c62 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c63 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c64 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c65 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c66 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c67 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c68 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c69 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c70 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c71 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c72 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c73 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c74 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c75 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c76 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c77 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c78 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c79 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c80 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c81 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c82 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c83 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c9 SIMPLE [] +4 5 0 2010-04-17 +PREHOOK: query: select * from T4 +PREHOOK: type: QUERY +PREHOOK: Input: default@t4 +PREHOOK: Input: default@t4@ds=2010-04-17 +#### A masked pattern was here #### +POSTHOOK: query: select * from T4 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t4@ds=2010-04-17 +#### A masked pattern was here #### +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c0 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c17 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c19 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c23 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c24 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c25 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c9 SIMPLE [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c0 EXPRESSION [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c1 EXPRESSION [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c0 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c17 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c19 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c23 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c24 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c25 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c26 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c27 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c28 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c29 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c30 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c31 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c32 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c33 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c34 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c35 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c36 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c37 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c38 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c39 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c40 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c41 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c42 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c43 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c44 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c45 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c46 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c47 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c48 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c49 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c50 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c51 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c52 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c53 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c54 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c55 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c56 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c57 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c58 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c59 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c60 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c61 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c62 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c63 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c64 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c65 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c66 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c67 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c68 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c69 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c70 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c71 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c72 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c73 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c74 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c75 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c76 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c77 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c78 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c79 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c80 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c81 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c82 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c83 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c9 SIMPLE [] +4 1 1 8 4 5 1 0 9 U 2 2 0 2 1 1 J C A U 2 s 2 NULL NULL NULL NULL NULL NULL 1 j S 6 NULL 1 2 J g 1 e 2 1 2 U P p 3 0 0 0 1 1 1 0 0 0 6 2 j NULL NULL NULL NULL NULL NULL 5 NULL NULL j 2 2 1 2 2 1 1 1 1 1 1 1 1 32 NULL 2010-04-17 +PREHOOK: query: SELECT a.c1 as a_c1, b.c1 b_c1, d.c0 as d_c0 +FROM T1 a JOIN T2 b + ON (a.c1 = b.c1 AND a.ds='2010-04-17' AND b.ds='2010-04-17') + JOIN T3 c + ON (a.c1 = c.c1 AND a.ds='2010-04-17' AND c.ds='2010-04-17') + JOIN T4 d + ON (c.c0 = d.c0 AND c.ds='2010-04-17' AND d.ds='2010-04-17') +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@ds=2010-04-17 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@ds=2010-04-17 +PREHOOK: Input: default@t3 +PREHOOK: Input: default@t3@ds=2010-04-17 +PREHOOK: Input: default@t4 +PREHOOK: Input: default@t4@ds=2010-04-17 +#### A masked pattern was here #### +POSTHOOK: query: SELECT a.c1 as a_c1, b.c1 b_c1, d.c0 as d_c0 +FROM T1 a JOIN T2 b + ON (a.c1 = b.c1 AND a.ds='2010-04-17' AND b.ds='2010-04-17') + JOIN T3 c + ON (a.c1 = c.c1 AND a.ds='2010-04-17' AND c.ds='2010-04-17') + JOIN T4 d + ON (c.c0 = d.c0 AND c.ds='2010-04-17' AND d.ds='2010-04-17') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@ds=2010-04-17 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@ds=2010-04-17 +POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t3@ds=2010-04-17 +POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t4@ds=2010-04-17 +#### A masked pattern was here #### +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c0 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c17 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c19 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c23 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c24 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c25 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c9 SIMPLE [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c0 EXPRESSION [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c1 EXPRESSION [] +POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c0 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c17 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c19 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c23 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c24 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c25 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c26 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c27 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c28 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c29 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c30 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c31 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c32 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c33 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c34 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c35 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c36 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c37 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c38 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c39 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c40 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c41 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c42 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c43 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c44 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c45 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c46 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c47 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c48 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c49 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c50 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c51 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c52 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c53 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c54 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c55 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c56 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c57 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c58 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c59 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c60 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c61 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c62 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c63 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c64 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c65 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c66 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c67 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c68 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c69 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c70 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c71 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c72 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c73 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c74 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c75 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c76 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c77 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c78 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c79 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c80 SIMPLE [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c81 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c82 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c83 EXPRESSION [] +POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c9 SIMPLE [] +5 5 4 Index: ql/src/test/results/clientpositive/tez/groupby1.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/groupby1.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/groupby1.q.out (working copy) @@ -0,0 +1,487 @@ +PREHOOK: query: CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest_g1 +PREHOOK: query: EXPLAIN +FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src) key)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: key, value + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: rand() + type: double + tag: -1 + value expressions: + expr: substr(value, 5) + type: string + Reduce Operator Tree: + Group By Operator + aggregations: + expr: sum(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: partial1 + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: double + Reduce Operator Tree: + Group By Operator + aggregations: + expr: sum(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: final + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: _col1 + type: double + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_g1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_g1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_g1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_g1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest_g1 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest_g1 +POSTHOOK: Lineage: dest_g1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_g1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest_g1.* FROM dest_g1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest_g1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT dest_g1.* FROM dest_g1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest_g1 +#### A masked pattern was here #### +POSTHOOK: Lineage: dest_g1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_g1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 0.0 +10 10.0 +100 200.0 +103 206.0 +104 208.0 +105 105.0 +11 11.0 +111 111.0 +113 226.0 +114 114.0 +116 116.0 +118 236.0 +119 357.0 +12 24.0 +120 240.0 +125 250.0 +126 126.0 +128 384.0 +129 258.0 +131 131.0 +133 133.0 +134 268.0 +136 136.0 +137 274.0 +138 552.0 +143 143.0 +145 145.0 +146 292.0 +149 298.0 +15 30.0 +150 150.0 +152 304.0 +153 153.0 +155 155.0 +156 156.0 +157 157.0 +158 158.0 +160 160.0 +162 162.0 +163 163.0 +164 328.0 +165 330.0 +166 166.0 +167 501.0 +168 168.0 +169 676.0 +17 17.0 +170 170.0 +172 344.0 +174 348.0 +175 350.0 +176 352.0 +177 177.0 +178 178.0 +179 358.0 +18 36.0 +180 180.0 +181 181.0 +183 183.0 +186 186.0 +187 561.0 +189 189.0 +19 19.0 +190 190.0 +191 382.0 +192 192.0 +193 579.0 +194 194.0 +195 390.0 +196 196.0 +197 394.0 +199 597.0 +2 2.0 +20 20.0 +200 400.0 +201 201.0 +202 202.0 +203 406.0 +205 410.0 +207 414.0 +208 624.0 +209 418.0 +213 426.0 +214 214.0 +216 432.0 +217 434.0 +218 218.0 +219 438.0 +221 442.0 +222 222.0 +223 446.0 +224 448.0 +226 226.0 +228 228.0 +229 458.0 +230 1150.0 +233 466.0 +235 235.0 +237 474.0 +238 476.0 +239 478.0 +24 48.0 +241 241.0 +242 484.0 +244 244.0 +247 247.0 +248 248.0 +249 249.0 +252 252.0 +255 510.0 +256 512.0 +257 257.0 +258 258.0 +26 52.0 +260 260.0 +262 262.0 +263 263.0 +265 530.0 +266 266.0 +27 27.0 +272 544.0 +273 819.0 +274 274.0 +275 275.0 +277 1108.0 +278 556.0 +28 28.0 +280 560.0 +281 562.0 +282 564.0 +283 283.0 +284 284.0 +285 285.0 +286 286.0 +287 287.0 +288 576.0 +289 289.0 +291 291.0 +292 292.0 +296 296.0 +298 894.0 +30 30.0 +302 302.0 +305 305.0 +306 306.0 +307 614.0 +308 308.0 +309 618.0 +310 310.0 +311 933.0 +315 315.0 +316 948.0 +317 634.0 +318 954.0 +321 642.0 +322 644.0 +323 323.0 +325 650.0 +327 981.0 +33 33.0 +331 662.0 +332 332.0 +333 666.0 +335 335.0 +336 336.0 +338 338.0 +339 339.0 +34 34.0 +341 341.0 +342 684.0 +344 688.0 +345 345.0 +348 1740.0 +35 105.0 +351 351.0 +353 706.0 +356 356.0 +360 360.0 +362 362.0 +364 364.0 +365 365.0 +366 366.0 +367 734.0 +368 368.0 +369 1107.0 +37 74.0 +373 373.0 +374 374.0 +375 375.0 +377 377.0 +378 378.0 +379 379.0 +382 764.0 +384 1152.0 +386 386.0 +389 389.0 +392 392.0 +393 393.0 +394 394.0 +395 790.0 +396 1188.0 +397 794.0 +399 798.0 +4 4.0 +400 400.0 +401 2005.0 +402 402.0 +403 1209.0 +404 808.0 +406 1624.0 +407 407.0 +409 1227.0 +41 41.0 +411 411.0 +413 826.0 +414 828.0 +417 1251.0 +418 418.0 +419 419.0 +42 84.0 +421 421.0 +424 848.0 +427 427.0 +429 858.0 +43 43.0 +430 1290.0 +431 1293.0 +432 432.0 +435 435.0 +436 436.0 +437 437.0 +438 1314.0 +439 878.0 +44 44.0 +443 443.0 +444 444.0 +446 446.0 +448 448.0 +449 449.0 +452 452.0 +453 453.0 +454 1362.0 +455 455.0 +457 457.0 +458 916.0 +459 918.0 +460 460.0 +462 924.0 +463 926.0 +466 1398.0 +467 467.0 +468 1872.0 +469 2345.0 +47 47.0 +470 470.0 +472 472.0 +475 475.0 +477 477.0 +478 956.0 +479 479.0 +480 1440.0 +481 481.0 +482 482.0 +483 483.0 +484 484.0 +485 485.0 +487 487.0 +489 1956.0 +490 490.0 +491 491.0 +492 984.0 +493 493.0 +494 494.0 +495 495.0 +496 496.0 +497 497.0 +498 1494.0 +5 15.0 +51 102.0 +53 53.0 +54 54.0 +57 57.0 +58 116.0 +64 64.0 +65 65.0 +66 66.0 +67 134.0 +69 69.0 +70 210.0 +72 144.0 +74 74.0 +76 152.0 +77 77.0 +78 78.0 +8 8.0 +80 80.0 +82 82.0 +83 166.0 +84 168.0 +85 85.0 +86 86.0 +87 87.0 +9 9.0 +90 270.0 +92 92.0 +95 190.0 +96 96.0 +97 194.0 +98 196.0 Index: ql/src/test/results/clientpositive/tez/groupby2.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/groupby2.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/groupby2.q.out (working copy) @@ -0,0 +1,174 @@ +PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest_g2 +PREHOOK: query: EXPLAIN +FROM src +INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +FROM src +INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g2))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: key, value + Reduce Output Operator + key expressions: + expr: substr(key, 1, 1) + type: string + expr: substr(value, 5) + type: string + sort order: ++ + Map-reduce partition columns: + expr: substr(key, 1, 1) + type: string + tag: -1 + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(DISTINCT KEY._col1:0._col0) + expr: sum(KEY._col1:0._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: complete + outputColumnNames: _col0, _col1, _col2 + Select Operator + expressions: + expr: _col0 + type: string + expr: UDFToInteger(_col1) + type: int + expr: concat(_col0, _col2) + type: string + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_g2 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_g2 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_g2 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_g2 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest_g2 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest_g2 +POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_g2.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest_g2.* FROM dest_g2 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest_g2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT dest_g2.* FROM dest_g2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest_g2 +#### A masked pattern was here #### +POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_g2.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +0 1 00.0 +1 71 116414.0 +2 69 225571.0 +3 62 332004.0 +4 74 452763.0 +5 6 5397.0 +6 5 6398.0 +7 6 7735.0 +8 8 8762.0 +9 7 91047.0 Index: ql/src/test/results/clientpositive/tez/groupby3.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/groupby3.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/groupby3.q.out (working copy) @@ -0,0 +1,262 @@ +PREHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: EXPLAIN +FROM src +INSERT OVERWRITE TABLE dest1 SELECT + sum(substr(src.value,5)), + avg(substr(src.value,5)), + avg(DISTINCT substr(src.value,5)), + max(substr(src.value,5)), + min(substr(src.value,5)), + std(substr(src.value,5)), + stddev_samp(substr(src.value,5)), + variance(substr(src.value,5)), + var_samp(substr(src.value,5)) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +FROM src +INSERT OVERWRITE TABLE dest1 SELECT + sum(substr(src.value,5)), + avg(substr(src.value,5)), + avg(DISTINCT substr(src.value,5)), + max(substr(src.value,5)), + min(substr(src.value,5)), + std(substr(src.value,5)), + stddev_samp(substr(src.value,5)), + variance(substr(src.value,5)), + var_samp(substr(src.value,5)) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION std (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION stddev_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION variance (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION var_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: value + type: string + outputColumnNames: value + Reduce Output Operator + key expressions: + expr: substr(value, 5) + type: string + sort order: + + Map-reduce partition columns: + expr: substr(value, 5) + type: string + tag: -1 + Reduce Operator Tree: + Group By Operator + aggregations: + expr: sum(KEY._col0:0._col0) + expr: avg(KEY._col0:0._col0) + expr: avg(DISTINCT KEY._col0:0._col0) + expr: max(KEY._col0:0._col0) + expr: min(KEY._col0:0._col0) + expr: std(KEY._col0:0._col0) + expr: stddev_samp(KEY._col0:0._col0) + expr: variance(KEY._col0:0._col0) + expr: var_samp(KEY._col0:0._col0) + bucketGroup: false + mode: partial1 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 + Reduce Output Operator + sort order: + tag: -1 + value expressions: + expr: _col0 + type: double + expr: _col1 + type: struct + expr: _col2 + type: struct + expr: _col3 + type: string + expr: _col4 + type: string + expr: _col5 + type: struct + expr: _col6 + type: struct + expr: _col7 + type: struct + expr: _col8 + type: struct + Reduce Operator Tree: + Group By Operator + aggregations: + expr: sum(VALUE._col0) + expr: avg(VALUE._col1) + expr: avg(VALUE._col2) + expr: max(VALUE._col3) + expr: min(VALUE._col4) + expr: std(VALUE._col5) + expr: stddev_samp(VALUE._col6) + expr: variance(VALUE._col7) + expr: var_samp(VALUE._col8) + bucketGroup: false + mode: final + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 + Select Operator + expressions: + expr: _col0 + type: double + expr: _col1 + type: double + expr: _col2 + type: double + expr: UDFToDouble(_col3) + type: double + expr: UDFToDouble(_col4) + type: double + expr: _col5 + type: double + expr: _col6 + type: double + expr: _col7 + type: double + expr: _col8 + type: double + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE dest1 SELECT + sum(substr(src.value,5)), + avg(substr(src.value,5)), + avg(DISTINCT substr(src.value,5)), + max(substr(src.value,5)), + min(substr(src.value,5)), + std(substr(src.value,5)), + stddev_samp(substr(src.value,5)), + variance(substr(src.value,5)), + var_samp(substr(src.value,5)) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE dest1 SELECT + sum(substr(src.value,5)), + avg(substr(src.value,5)), + avg(DISTINCT substr(src.value,5)), + max(substr(src.value,5)), + min(substr(src.value,5)), + std(substr(src.value,5)), + stddev_samp(substr(src.value,5)), + variance(substr(src.value,5)), + var_samp(substr(src.value,5)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1.* FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +#### A masked pattern was here #### +POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +130091.0 260.182 256.10355987055016 98.0 0.0 142.92680950752379 143.06995106518903 20428.07287599999 20469.010897795582 Index: ql/src/test/results/clientpositive/tez/having.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/having.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/having.q.out (working copy) @@ -0,0 +1,1274 @@ +PREHOOK: query: EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL value)) c)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_HAVING (> (TOK_TABLE_OR_COL c) 3)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: key, value + Group By Operator + aggregations: + expr: count(value) + bucketGroup: false + keys: + expr: key + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Filter Operator + predicate: + expr: (_col1 > 3) + type: boolean + Select Operator + expressions: + expr: _col1 + type: bigint + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +4 +4 +5 +4 +5 +5 +4 +4 +5 +4 +PREHOOK: query: EXPLAIN SELECT key, max(value) AS c FROM src GROUP BY key HAVING key != 302 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key, max(value) AS c FROM src GROUP BY key HAVING key != 302 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION max (TOK_TABLE_OR_COL value)) c)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_HAVING (!= (TOK_TABLE_OR_COL key) 302)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Filter Operator + predicate: + expr: (key <> 302) + type: boolean + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: key, value + Group By Operator + aggregations: + expr: max(value) + bucketGroup: false + keys: + expr: key + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: string + Reduce Operator Tree: + Group By Operator + aggregations: + expr: max(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: SELECT key, max(value) AS c FROM src GROUP BY key HAVING key != 302 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT key, max(value) AS c FROM src GROUP BY key HAVING key != 302 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 val_0 +10 val_10 +100 val_100 +103 val_103 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +119 val_119 +12 val_12 +120 val_120 +125 val_125 +126 val_126 +128 val_128 +129 val_129 +131 val_131 +133 val_133 +134 val_134 +136 val_136 +137 val_137 +138 val_138 +143 val_143 +145 val_145 +146 val_146 +149 val_149 +15 val_15 +150 val_150 +152 val_152 +153 val_153 +155 val_155 +156 val_156 +157 val_157 +158 val_158 +160 val_160 +162 val_162 +163 val_163 +164 val_164 +165 val_165 +166 val_166 +167 val_167 +168 val_168 +169 val_169 +17 val_17 +170 val_170 +172 val_172 +174 val_174 +175 val_175 +176 val_176 +177 val_177 +178 val_178 +179 val_179 +18 val_18 +180 val_180 +181 val_181 +183 val_183 +186 val_186 +187 val_187 +189 val_189 +19 val_19 +190 val_190 +191 val_191 +192 val_192 +193 val_193 +194 val_194 +195 val_195 +196 val_196 +197 val_197 +199 val_199 +2 val_2 +20 val_20 +200 val_200 +201 val_201 +202 val_202 +203 val_203 +205 val_205 +207 val_207 +208 val_208 +209 val_209 +213 val_213 +214 val_214 +216 val_216 +217 val_217 +218 val_218 +219 val_219 +221 val_221 +222 val_222 +223 val_223 +224 val_224 +226 val_226 +228 val_228 +229 val_229 +230 val_230 +233 val_233 +235 val_235 +237 val_237 +238 val_238 +239 val_239 +24 val_24 +241 val_241 +242 val_242 +244 val_244 +247 val_247 +248 val_248 +249 val_249 +252 val_252 +255 val_255 +256 val_256 +257 val_257 +258 val_258 +26 val_26 +260 val_260 +262 val_262 +263 val_263 +265 val_265 +266 val_266 +27 val_27 +272 val_272 +273 val_273 +274 val_274 +275 val_275 +277 val_277 +278 val_278 +28 val_28 +280 val_280 +281 val_281 +282 val_282 +283 val_283 +284 val_284 +285 val_285 +286 val_286 +287 val_287 +288 val_288 +289 val_289 +291 val_291 +292 val_292 +296 val_296 +298 val_298 +30 val_30 +305 val_305 +306 val_306 +307 val_307 +308 val_308 +309 val_309 +310 val_310 +311 val_311 +315 val_315 +316 val_316 +317 val_317 +318 val_318 +321 val_321 +322 val_322 +323 val_323 +325 val_325 +327 val_327 +33 val_33 +331 val_331 +332 val_332 +333 val_333 +335 val_335 +336 val_336 +338 val_338 +339 val_339 +34 val_34 +341 val_341 +342 val_342 +344 val_344 +345 val_345 +348 val_348 +35 val_35 +351 val_351 +353 val_353 +356 val_356 +360 val_360 +362 val_362 +364 val_364 +365 val_365 +366 val_366 +367 val_367 +368 val_368 +369 val_369 +37 val_37 +373 val_373 +374 val_374 +375 val_375 +377 val_377 +378 val_378 +379 val_379 +382 val_382 +384 val_384 +386 val_386 +389 val_389 +392 val_392 +393 val_393 +394 val_394 +395 val_395 +396 val_396 +397 val_397 +399 val_399 +4 val_4 +400 val_400 +401 val_401 +402 val_402 +403 val_403 +404 val_404 +406 val_406 +407 val_407 +409 val_409 +41 val_41 +411 val_411 +413 val_413 +414 val_414 +417 val_417 +418 val_418 +419 val_419 +42 val_42 +421 val_421 +424 val_424 +427 val_427 +429 val_429 +43 val_43 +430 val_430 +431 val_431 +432 val_432 +435 val_435 +436 val_436 +437 val_437 +438 val_438 +439 val_439 +44 val_44 +443 val_443 +444 val_444 +446 val_446 +448 val_448 +449 val_449 +452 val_452 +453 val_453 +454 val_454 +455 val_455 +457 val_457 +458 val_458 +459 val_459 +460 val_460 +462 val_462 +463 val_463 +466 val_466 +467 val_467 +468 val_468 +469 val_469 +47 val_47 +470 val_470 +472 val_472 +475 val_475 +477 val_477 +478 val_478 +479 val_479 +480 val_480 +481 val_481 +482 val_482 +483 val_483 +484 val_484 +485 val_485 +487 val_487 +489 val_489 +490 val_490 +491 val_491 +492 val_492 +493 val_493 +494 val_494 +495 val_495 +496 val_496 +497 val_497 +498 val_498 +5 val_5 +51 val_51 +53 val_53 +54 val_54 +57 val_57 +58 val_58 +64 val_64 +65 val_65 +66 val_66 +67 val_67 +69 val_69 +70 val_70 +72 val_72 +74 val_74 +76 val_76 +77 val_77 +78 val_78 +8 val_8 +80 val_80 +82 val_82 +83 val_83 +84 val_84 +85 val_85 +86 val_86 +87 val_87 +9 val_9 +90 val_90 +92 val_92 +95 val_95 +96 val_96 +97 val_97 +98 val_98 +PREHOOK: query: EXPLAIN SELECT key FROM src GROUP BY key HAVING max(value) > "val_255" +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key FROM src GROUP BY key HAVING max(value) > "val_255" +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_HAVING (> (TOK_FUNCTION max (TOK_TABLE_OR_COL value)) "val_255")))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: key, value + Group By Operator + aggregations: + expr: max(value) + bucketGroup: false + keys: + expr: key + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: string + Reduce Operator Tree: + Group By Operator + aggregations: + expr: max(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Filter Operator + predicate: + expr: (_col1 > 'val_255') + type: boolean + Select Operator + expressions: + expr: _col0 + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: SELECT key FROM src GROUP BY key HAVING max(value) > "val_255" +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT key FROM src GROUP BY key HAVING max(value) > "val_255" +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +256 +257 +258 +26 +260 +262 +263 +265 +266 +27 +272 +273 +274 +275 +277 +278 +28 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +291 +292 +296 +298 +30 +302 +305 +306 +307 +308 +309 +310 +311 +315 +316 +317 +318 +321 +322 +323 +325 +327 +33 +331 +332 +333 +335 +336 +338 +339 +34 +341 +342 +344 +345 +348 +35 +351 +353 +356 +360 +362 +364 +365 +366 +367 +368 +369 +37 +373 +374 +375 +377 +378 +379 +382 +384 +386 +389 +392 +393 +394 +395 +396 +397 +399 +4 +400 +401 +402 +403 +404 +406 +407 +409 +41 +411 +413 +414 +417 +418 +419 +42 +421 +424 +427 +429 +43 +430 +431 +432 +435 +436 +437 +438 +439 +44 +443 +444 +446 +448 +449 +452 +453 +454 +455 +457 +458 +459 +460 +462 +463 +466 +467 +468 +469 +47 +470 +472 +475 +477 +478 +479 +480 +481 +482 +483 +484 +485 +487 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +5 +51 +53 +54 +57 +58 +64 +65 +66 +67 +69 +70 +72 +74 +76 +77 +78 +8 +80 +82 +83 +84 +85 +86 +87 +9 +90 +92 +95 +96 +97 +98 +PREHOOK: query: EXPLAIN SELECT key FROM src where key > 300 GROUP BY key HAVING max(value) > "val_255" +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key FROM src where key > 300 GROUP BY key HAVING max(value) > "val_255" +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))) (TOK_WHERE (> (TOK_TABLE_OR_COL key) 300)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_HAVING (> (TOK_FUNCTION max (TOK_TABLE_OR_COL value)) "val_255")))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Filter Operator + predicate: + expr: (key > 300) + type: boolean + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: key, value + Group By Operator + aggregations: + expr: max(value) + bucketGroup: false + keys: + expr: key + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: string + Reduce Operator Tree: + Group By Operator + aggregations: + expr: max(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Filter Operator + predicate: + expr: (_col1 > 'val_255') + type: boolean + Select Operator + expressions: + expr: _col0 + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: SELECT key FROM src where key > 300 GROUP BY key HAVING max(value) > "val_255" +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT key FROM src where key > 300 GROUP BY key HAVING max(value) > "val_255" +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +302 +305 +306 +307 +308 +309 +310 +311 +315 +316 +317 +318 +321 +322 +323 +325 +327 +331 +332 +333 +335 +336 +338 +339 +341 +342 +344 +345 +348 +351 +353 +356 +360 +362 +364 +365 +366 +367 +368 +369 +373 +374 +375 +377 +378 +379 +382 +384 +386 +389 +392 +393 +394 +395 +396 +397 +399 +400 +401 +402 +403 +404 +406 +407 +409 +411 +413 +414 +417 +418 +419 +421 +424 +427 +429 +430 +431 +432 +435 +436 +437 +438 +439 +443 +444 +446 +448 +449 +452 +453 +454 +455 +457 +458 +459 +460 +462 +463 +466 +467 +468 +469 +470 +472 +475 +477 +478 +479 +480 +481 +482 +483 +484 +485 +487 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +PREHOOK: query: EXPLAIN SELECT key, max(value) FROM src GROUP BY key HAVING max(value) > "val_255" +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key, max(value) FROM src GROUP BY key HAVING max(value) > "val_255" +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION max (TOK_TABLE_OR_COL value)))) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_HAVING (> (TOK_FUNCTION max (TOK_TABLE_OR_COL value)) "val_255")))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: key, value + Group By Operator + aggregations: + expr: max(value) + bucketGroup: false + keys: + expr: key + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: string + Reduce Operator Tree: + Group By Operator + aggregations: + expr: max(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Filter Operator + predicate: + expr: (_col1 > 'val_255') + type: boolean + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: SELECT key, max(value) FROM src GROUP BY key HAVING max(value) > "val_255" +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT key, max(value) FROM src GROUP BY key HAVING max(value) > "val_255" +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +256 val_256 +257 val_257 +258 val_258 +26 val_26 +260 val_260 +262 val_262 +263 val_263 +265 val_265 +266 val_266 +27 val_27 +272 val_272 +273 val_273 +274 val_274 +275 val_275 +277 val_277 +278 val_278 +28 val_28 +280 val_280 +281 val_281 +282 val_282 +283 val_283 +284 val_284 +285 val_285 +286 val_286 +287 val_287 +288 val_288 +289 val_289 +291 val_291 +292 val_292 +296 val_296 +298 val_298 +30 val_30 +302 val_302 +305 val_305 +306 val_306 +307 val_307 +308 val_308 +309 val_309 +310 val_310 +311 val_311 +315 val_315 +316 val_316 +317 val_317 +318 val_318 +321 val_321 +322 val_322 +323 val_323 +325 val_325 +327 val_327 +33 val_33 +331 val_331 +332 val_332 +333 val_333 +335 val_335 +336 val_336 +338 val_338 +339 val_339 +34 val_34 +341 val_341 +342 val_342 +344 val_344 +345 val_345 +348 val_348 +35 val_35 +351 val_351 +353 val_353 +356 val_356 +360 val_360 +362 val_362 +364 val_364 +365 val_365 +366 val_366 +367 val_367 +368 val_368 +369 val_369 +37 val_37 +373 val_373 +374 val_374 +375 val_375 +377 val_377 +378 val_378 +379 val_379 +382 val_382 +384 val_384 +386 val_386 +389 val_389 +392 val_392 +393 val_393 +394 val_394 +395 val_395 +396 val_396 +397 val_397 +399 val_399 +4 val_4 +400 val_400 +401 val_401 +402 val_402 +403 val_403 +404 val_404 +406 val_406 +407 val_407 +409 val_409 +41 val_41 +411 val_411 +413 val_413 +414 val_414 +417 val_417 +418 val_418 +419 val_419 +42 val_42 +421 val_421 +424 val_424 +427 val_427 +429 val_429 +43 val_43 +430 val_430 +431 val_431 +432 val_432 +435 val_435 +436 val_436 +437 val_437 +438 val_438 +439 val_439 +44 val_44 +443 val_443 +444 val_444 +446 val_446 +448 val_448 +449 val_449 +452 val_452 +453 val_453 +454 val_454 +455 val_455 +457 val_457 +458 val_458 +459 val_459 +460 val_460 +462 val_462 +463 val_463 +466 val_466 +467 val_467 +468 val_468 +469 val_469 +47 val_47 +470 val_470 +472 val_472 +475 val_475 +477 val_477 +478 val_478 +479 val_479 +480 val_480 +481 val_481 +482 val_482 +483 val_483 +484 val_484 +485 val_485 +487 val_487 +489 val_489 +490 val_490 +491 val_491 +492 val_492 +493 val_493 +494 val_494 +495 val_495 +496 val_496 +497 val_497 +498 val_498 +5 val_5 +51 val_51 +53 val_53 +54 val_54 +57 val_57 +58 val_58 +64 val_64 +65 val_65 +66 val_66 +67 val_67 +69 val_69 +70 val_70 +72 val_72 +74 val_74 +76 val_76 +77 val_77 +78 val_78 +8 val_8 +80 val_80 +82 val_82 +83 val_83 +84 val_84 +85 val_85 +86 val_86 +87 val_87 +9 val_9 +90 val_90 +92 val_92 +95 val_95 +96 val_96 +97 val_97 +98 val_98 Index: ql/src/test/results/clientpositive/tez/insert1.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/insert1.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/insert1.q.out (working copy) @@ -0,0 +1,779 @@ +PREHOOK: query: create table insert1(key int, value string) stored as textfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table insert1(key int, value string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@insert1 +PREHOOK: query: create table insert2(key int, value string) stored as textfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table insert2(key int, value string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@insert2 +PREHOOK: query: insert overwrite table insert1 select a.key, a.value from insert2 a WHERE (a.key=-1) +PREHOOK: type: QUERY +PREHOOK: Input: default@insert2 +PREHOOK: Output: default@insert1 +POSTHOOK: query: insert overwrite table insert1 select a.key, a.value from insert2 a WHERE (a.key=-1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert2 +POSTHOOK: Output: default@insert1 +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: explain insert into table insert1 select a.key, a.value from insert2 a WHERE (a.key=-1) +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into table insert1 select a.key, a.value from insert2 a WHERE (a.key=-1) +POSTHOOK: type: QUERY +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME insert2) a)) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME insert1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL a) key) (- 1))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + a + TableScan + alias: a + Filter Operator + predicate: + expr: (key = (- 1)) + type: boolean + Select Operator + expressions: + expr: key + type: int + expr: value + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: explain insert into table INSERT1 select a.key, a.value from insert2 a WHERE (a.key=-1) +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into table INSERT1 select a.key, a.value from insert2 a WHERE (a.key=-1) +POSTHOOK: type: QUERY +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME insert2) a)) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME INSERT1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL a) key) (- 1))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + a + TableScan + alias: a + Filter Operator + predicate: + expr: (key = (- 1)) + type: boolean + Select Operator + expressions: + expr: key + type: int + expr: value + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: -- HIVE-3465 +create database x +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- HIVE-3465 +create database x +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: create table x.insert1(key int, value string) stored as textfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table x.insert1(key int, value string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: x@insert1 +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: explain insert into table x.INSERT1 select a.key, a.value from insert2 a WHERE (a.key=-1) +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into table x.INSERT1 select a.key, a.value from insert2 a WHERE (a.key=-1) +POSTHOOK: type: QUERY +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME insert2) a)) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME x INSERT1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL a) key) (- 1))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + a + TableScan + alias: a + Filter Operator + predicate: + expr: (key = (- 1)) + type: boolean + Select Operator + expressions: + expr: key + type: int + expr: value + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: x.insert1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: x.insert1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: x.insert1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: x.insert1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: explain insert into table default.INSERT1 select a.key, a.value from insert2 a WHERE (a.key=-1) +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into table default.INSERT1 select a.key, a.value from insert2 a WHERE (a.key=-1) +POSTHOOK: type: QUERY +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME insert2) a)) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME default INSERT1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL a) key) (- 1))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + a + TableScan + alias: a + Filter Operator + predicate: + expr: (key = (- 1)) + type: boolean + Select Operator + expressions: + expr: key + type: int + expr: value + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: explain +from insert2 +insert into table insert1 select * where key < 10 +insert overwrite table x.insert1 select * where key > 10 and key < 20 +PREHOOK: type: QUERY +POSTHOOK: query: explain +from insert2 +insert into table insert1 select * where key < 10 +insert overwrite table x.insert1 select * where key > 10 and key < 20 +POSTHOOK: type: QUERY +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME insert2))) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME insert1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (< (TOK_TABLE_OR_COL key) 10))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME x insert1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (and (> (TOK_TABLE_OR_COL key) 10) (< (TOK_TABLE_OR_COL key) 20))))) + +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-9 depends on stages: Stage-2 , consists of Stage-6, Stage-5, Stage-7 + Stage-6 + Stage-3 depends on stages: Stage-6, Stage-5, Stage-8, Stage-12, Stage-11, Stage-14 + Stage-0 depends on stages: Stage-3 + Stage-4 depends on stages: Stage-0 + Stage-1 depends on stages: Stage-3 + Stage-10 depends on stages: Stage-1 + Stage-5 + Stage-7 + Stage-8 depends on stages: Stage-7 + Stage-15 depends on stages: Stage-2 , consists of Stage-12, Stage-11, Stage-13 + Stage-12 + Stage-11 + Stage-13 + Stage-14 depends on stages: Stage-13 + +STAGE PLANS: + Stage: Stage-2 + Tez + Alias -> Map Operator Tree: + insert2 + TableScan + alias: insert2 + Filter Operator + predicate: + expr: (key < 10) + type: boolean + Select Operator + expressions: + expr: key + type: int + expr: value + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + Filter Operator + predicate: + expr: ((key > 10) and (key < 20)) + type: boolean + Select Operator + expressions: + expr: key + type: int + expr: value + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 2 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: x.insert1 + + Stage: Stage-9 + Conditional Operator + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-3 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + + Stage: Stage-4 + Stats-Aggr Operator + + Stage: Stage-1 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: x.insert1 + + Stage: Stage-10 + Stats-Aggr Operator + + Stage: Stage-5 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + + Stage: Stage-7 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert1 + + Stage: Stage-8 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-15 + Conditional Operator + + Stage: Stage-12 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-11 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: x.insert1 + + Stage: Stage-13 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: x.insert1 + + Stage: Stage-14 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: -- HIVE-3676 +CREATE DATABASE db2 +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: -- HIVE-3676 +CREATE DATABASE db2 +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: USE db2 +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE db2 +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE result(col1 STRING) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE result(col1 STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: db2@result +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: INSERT OVERWRITE TABLE result SELECT 'db2_insert1' FROM default.src LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: db2@result +POSTHOOK: query: INSERT OVERWRITE TABLE result SELECT 'db2_insert1' FROM default.src LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: db2@result +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: result.col1 SIMPLE [] +PREHOOK: query: INSERT INTO TABLE result SELECT 'db2_insert2' FROM default.src LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: db2@result +POSTHOOK: query: INSERT INTO TABLE result SELECT 'db2_insert2' FROM default.src LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: db2@result +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: result.col1 SIMPLE [] +POSTHOOK: Lineage: result.col1 SIMPLE [] +PREHOOK: query: SELECT * FROM result order by col1 +PREHOOK: type: QUERY +PREHOOK: Input: db2@result +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM result order by col1 +POSTHOOK: type: QUERY +POSTHOOK: Input: db2@result +#### A masked pattern was here #### +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: result.col1 SIMPLE [] +POSTHOOK: Lineage: result.col1 SIMPLE [] +db2_insert1 +db2_insert2 +PREHOOK: query: USE default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE default +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: result.col1 SIMPLE [] +POSTHOOK: Lineage: result.col1 SIMPLE [] +PREHOOK: query: CREATE DATABASE db1 +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: CREATE DATABASE db1 +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: result.col1 SIMPLE [] +POSTHOOK: Lineage: result.col1 SIMPLE [] +PREHOOK: query: CREATE TABLE db1.result(col1 STRING) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE db1.result(col1 STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: db1@result +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: result.col1 SIMPLE [] +POSTHOOK: Lineage: result.col1 SIMPLE [] +PREHOOK: query: INSERT OVERWRITE TABLE db1.result SELECT 'db1_insert1' FROM src LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: db1@result +POSTHOOK: query: INSERT OVERWRITE TABLE db1.result SELECT 'db1_insert1' FROM src LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: db1@result +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: result.col1 SIMPLE [] +POSTHOOK: Lineage: result.col1 SIMPLE [] +POSTHOOK: Lineage: result.col1 SIMPLE [] +PREHOOK: query: INSERT INTO TABLE db1.result SELECT 'db1_insert2' FROM src LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: db1@result +POSTHOOK: query: INSERT INTO TABLE db1.result SELECT 'db1_insert2' FROM src LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: db1@result +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: result.col1 SIMPLE [] +POSTHOOK: Lineage: result.col1 SIMPLE [] +POSTHOOK: Lineage: result.col1 SIMPLE [] +POSTHOOK: Lineage: result.col1 SIMPLE [] +PREHOOK: query: SELECT * FROM db1.result order by col1 +PREHOOK: type: QUERY +PREHOOK: Input: db1@result +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM db1.result order by col1 +POSTHOOK: type: QUERY +POSTHOOK: Input: db1@result +#### A masked pattern was here #### +POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: result.col1 SIMPLE [] +POSTHOOK: Lineage: result.col1 SIMPLE [] +POSTHOOK: Lineage: result.col1 SIMPLE [] +POSTHOOK: Lineage: result.col1 SIMPLE [] +db1_insert1 +db1_insert2 Index: ql/src/test/results/clientpositive/tez/insert_into1.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/insert_into1.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/insert_into1.q.out (working copy) @@ -0,0 +1,486 @@ +PREHOOK: query: DROP TABLE insert_into1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE insert_into1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE insert_into1 (key int, value string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE insert_into1 (key int, value string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * from src LIMIT 100 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * from src LIMIT 100 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME insert_into1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 100))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Limit + Reduce Output Operator + sort order: + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: INSERT INTO TABLE insert_into1 SELECT * from src LIMIT 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1 SELECT * from src LIMIT 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1 +) t +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1 +) t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +10226524244 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100 +POSTHOOK: type: QUERY +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME insert_into1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 100))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Limit + Reduce Output Operator + sort order: + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1 +) t +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1 +) t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +20453048488 +PREHOOK: query: SELECT COUNT(*) FROM insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(*) FROM insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +200 +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME insert_into1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 10))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Limit + Reduce Output Operator + sort order: + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1 +) t +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1 +) t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +-826625916 +PREHOOK: query: DROP TABLE insert_into1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@insert_into1 +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: DROP TABLE insert_into1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@insert_into1 +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/tez/insert_into2.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/insert_into2.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/insert_into2.q.out (working copy) @@ -0,0 +1,544 @@ +PREHOOK: query: DROP TABLE insert_into2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE insert_into2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE insert_into2 (key int, value string) + PARTITIONED BY (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE insert_into2 (key int, value string) + PARTITIONED BY (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@insert_into2 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into2 PARTITION (ds='1') + SELECT * FROM src LIMIT 100 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into2 PARTITION (ds='1') + SELECT * FROM src LIMIT 100 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME insert_into2) (TOK_PARTSPEC (TOK_PARTVAL ds '1')))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 100))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Limit + Reduce Output Operator + sort order: + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into2 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into2 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into2 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into2 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@insert_into2@ds=1 +POSTHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@insert_into2@ds=1 +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@insert_into2@ds=1 +POSTHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@insert_into2@ds=1 +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='1' +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into2 +PREHOOK: Input: default@insert_into2@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into2 +POSTHOOK: Input: default@insert_into2@ds=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +200 +PREHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2 +) t +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into2 +PREHOOK: Input: default@insert_into2@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2 +) t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into2 +POSTHOOK: Input: default@insert_into2@ds=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +-24159954504 +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2') + SELECT * FROM src LIMIT 100 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2') + SELECT * FROM src LIMIT 100 +POSTHOOK: type: QUERY +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME insert_into2) (TOK_PARTSPEC (TOK_PARTVAL ds '2')))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 100))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Limit + Reduce Output Operator + sort order: + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into2 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 2 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into2 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into2 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into2 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2') + SELECT * FROM src LIMIT 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@insert_into2@ds=2 +POSTHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2') + SELECT * FROM src LIMIT 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@insert_into2@ds=2 +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2 +) t +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into2 +PREHOOK: Input: default@insert_into2@ds=1 +PREHOOK: Input: default@insert_into2@ds=2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2 +) t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into2 +POSTHOOK: Input: default@insert_into2@ds=1 +POSTHOOK: Input: default@insert_into2@ds=2 +#### A masked pattern was here #### +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +-36239931656 +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2') + SELECT * FROM src LIMIT 50 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2') + SELECT * FROM src LIMIT 50 +POSTHOOK: type: QUERY +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME insert_into2) (TOK_PARTSPEC (TOK_PARTVAL ds '2')))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 50))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Limit + Reduce Output Operator + sort order: + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into2 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 2 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into2 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into2 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into2 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2') + SELECT * FROM src LIMIT 50 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@insert_into2@ds=2 +POSTHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2') + SELECT * FROM src LIMIT 50 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@insert_into2@ds=2 +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2 +) t +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into2 +PREHOOK: Input: default@insert_into2@ds=1 +PREHOOK: Input: default@insert_into2@ds=2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( + SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2 +) t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into2 +POSTHOOK: Input: default@insert_into2@ds=1 +POSTHOOK: Input: default@insert_into2@ds=2 +#### A masked pattern was here #### +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +-27100860056 +PREHOOK: query: DROP TABLE insert_into2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@insert_into2 +PREHOOK: Output: default@insert_into2 +POSTHOOK: query: DROP TABLE insert_into2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@insert_into2 +POSTHOOK: Output: default@insert_into2 +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/tez/join0.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/join0.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/join0.q.out (working copy) @@ -0,0 +1,262 @@ +PREHOOK: query: EXPLAIN +SELECT src1.key as k1, src1.value as v1, + src2.key as k2, src2.value as v2 FROM + (SELECT * FROM src WHERE src.key < 10) src1 + JOIN + (SELECT * FROM src WHERE src.key < 10) src2 + SORT BY k1, v1, k2, v2 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT src1.key as k1, src1.value as v1, + src2.key as k2, src2.value as v2 FROM + (SELECT * FROM src WHERE src.key < 10) src1 + JOIN + (SELECT * FROM src WHERE src.key < 10) src2 + SORT BY k1, v1, k2, v2 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 10)))) src1) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 10)))) src2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) k1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) v1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) k2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) v2)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k2)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v2))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Filter Operator + predicate: + expr: (key < 10) + type: boolean + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Reduce Output Operator + sort order: + tag: 1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Alias -> Map Operator Tree: + src + TableScan + alias: src + Filter Operator + predicate: + expr: (key < 10) + type: boolean + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Reduce Output Operator + sort order: + tag: 0 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col0} {VALUE._col1} + 1 {VALUE._col0} {VALUE._col1} + handleSkewJoin: false + outputColumnNames: _col0, _col1, _col2, _col3 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col2 + type: string + expr: _col3 + type: string + outputColumnNames: _col0, _col1, _col2, _col3 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col2 + type: string + expr: _col3 + type: string + sort order: ++++ + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col2 + type: string + expr: _col3 + type: string + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: EXPLAIN FORMATTED +SELECT src1.key as k1, src1.value as v1, + src2.key as k2, src2.value as v2 FROM + (SELECT * FROM src WHERE src.key < 10) src1 + JOIN + (SELECT * FROM src WHERE src.key < 10) src2 + SORT BY k1, v1, k2, v2 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN FORMATTED +SELECT src1.key as k1, src1.value as v1, + src2.key as k2, src2.value as v2 FROM + (SELECT * FROM src WHERE src.key < 10) src1 + JOIN + (SELECT * FROM src WHERE src.key < 10) src2 + SORT BY k1, v1, k2, v2 +POSTHOOK: type: QUERY +{"STAGE PLANS":{"Stage-1":{"Tez":{"Vectorized execution:":"false","Tez Work:":[{"Vectorized execution:":"false","Split Sample:":{},"Alias -> Map Operator Tree:":{"src":{"TS_0":{"FIL_13":{"SEL_2":{"RS_7":{"Reduce Output Operator":{"Vectorized execution:":"false","Map-reduce partition columns:":[],"sort order:":"","tag:":"1","value expressions:":[{"type:":"string","expr:":"_col0"},{"type:":"string","expr:":"_col1"}],"key expressions:":[]}}}}}}}},{"Vectorized execution:":"false","Split Sample:":{},"Alias -> Map Operator Tree:":{"src":{"TS_3":{"FIL_14":{"SEL_5":{"RS_6":{"Reduce Output Operator":{"Vectorized execution:":"false","Map-reduce partition columns:":[],"sort order:":"","tag:":"0","value expressions:":[{"type:":"string","expr:":"_col0"},{"type:":"string","expr:":"_col1"}],"key expressions:":[]}}}}}}}},{"Vectorized execution:":"false","Reduce Operator Tree:":{"JOIN_8":{"SEL_9":{"RS_10":{"Reduce Output Operator":{"Vectorized execution:":"false","Map-reduce partition columns:":[],"sort order:":"++++","tag:":"-1","value expressions:":[{"type:":"string","expr:":"_col0"},{"type:":"string","expr:":"_col1"},{"type:":"string","expr:":"_col2"},{"type:":"string","expr:":"_col3"}],"key expressions:":[{"type:":"string","expr:":"_col0"},{"type:":"string","expr:":"_col1"},{"type:":"string","expr:":"_col2"},{"type:":"string","expr:":"_col3"}]}}}}}},{"Vectorized execution:":"false","Reduce Operator Tree:":{"EX_11":{"FS_12":{"File Output Operator":{"Vectorized execution:":"false","GlobalTableId:":"0","compressed:":"false","table:":{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}}}}}}]}},"Stage-0":{"Fetch Operator":{"limit:":"-1"}}},"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-0":{"ROOT STAGE":"TRUE"}},"ABSTRACT SYNTAX TREE":"(TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 10)))) src1) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 10)))) src2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) k1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) v1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) k2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) v2)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k2)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v2)))))"} +PREHOOK: query: SELECT src1.key as k1, src1.value as v1, + src2.key as k2, src2.value as v2 FROM + (SELECT * FROM src WHERE src.key < 10) src1 + JOIN + (SELECT * FROM src WHERE src.key < 10) src2 + SORT BY k1, v1, k2, v2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT src1.key as k1, src1.value as v1, + src2.key as k2, src2.value as v2 FROM + (SELECT * FROM src WHERE src.key < 10) src1 + JOIN + (SELECT * FROM src WHERE src.key < 10) src2 + SORT BY k1, v1, k2, v2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 2 val_2 +0 val_0 2 val_2 +0 val_0 2 val_2 +0 val_0 4 val_4 +0 val_0 4 val_4 +0 val_0 4 val_4 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 8 val_8 +0 val_0 8 val_8 +0 val_0 8 val_8 +0 val_0 9 val_9 +0 val_0 9 val_9 +0 val_0 9 val_9 +2 val_2 0 val_0 +2 val_2 0 val_0 +2 val_2 0 val_0 +2 val_2 2 val_2 +2 val_2 4 val_4 +2 val_2 5 val_5 +2 val_2 5 val_5 +2 val_2 5 val_5 +2 val_2 8 val_8 +2 val_2 9 val_9 +4 val_4 0 val_0 +4 val_4 0 val_0 +4 val_4 0 val_0 +4 val_4 2 val_2 +4 val_4 4 val_4 +4 val_4 5 val_5 +4 val_4 5 val_5 +4 val_4 5 val_5 +4 val_4 8 val_8 +4 val_4 9 val_9 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 2 val_2 +5 val_5 2 val_2 +5 val_5 2 val_2 +5 val_5 4 val_4 +5 val_5 4 val_4 +5 val_5 4 val_4 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 8 val_8 +5 val_5 8 val_8 +5 val_5 8 val_8 +5 val_5 9 val_9 +5 val_5 9 val_9 +5 val_5 9 val_9 +8 val_8 0 val_0 +8 val_8 0 val_0 +8 val_8 0 val_0 +8 val_8 2 val_2 +8 val_8 4 val_4 +8 val_8 5 val_5 +8 val_8 5 val_5 +8 val_8 5 val_5 +8 val_8 8 val_8 +8 val_8 9 val_9 +9 val_9 0 val_0 +9 val_9 0 val_0 +9 val_9 0 val_0 +9 val_9 2 val_2 +9 val_9 4 val_4 +9 val_9 5 val_5 +9 val_9 5 val_5 +9 val_9 5 val_5 +9 val_9 8 val_8 +9 val_9 9 val_9 Index: ql/src/test/results/clientpositive/tez/join1.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/join1.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/join1.q.out (working copy) @@ -0,0 +1,1196 @@ +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest_j1 +PREHOOK: query: EXPLAIN +FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_j1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src2 + TableScan + alias: src2 + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 1 + value expressions: + expr: value + type: string + Alias -> Map Operator Tree: + src1 + TableScan + alias: src1 + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 0 + value expressions: + expr: key + type: string + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col0} + 1 {VALUE._col1} + handleSkewJoin: false + outputColumnNames: _col0, _col5 + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: _col5 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_j1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_j1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_j1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest_j1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest_j1 +POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest_j1 +POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest_j1.* FROM dest_j1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest_j1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT dest_j1.* FROM dest_j1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest_j1 +#### A masked pattern was here #### +POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +0 val_0 +0 val_0 +0 val_0 +0 val_0 +0 val_0 +0 val_0 +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +104 val_104 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +113 val_113 +113 val_113 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +118 val_118 +118 val_118 +119 val_119 +119 val_119 +119 val_119 +119 val_119 +119 val_119 +119 val_119 +119 val_119 +119 val_119 +119 val_119 +12 val_12 +12 val_12 +12 val_12 +12 val_12 +120 val_120 +120 val_120 +120 val_120 +120 val_120 +125 val_125 +125 val_125 +125 val_125 +125 val_125 +126 val_126 +128 val_128 +128 val_128 +128 val_128 +128 val_128 +128 val_128 +128 val_128 +128 val_128 +128 val_128 +128 val_128 +129 val_129 +129 val_129 +129 val_129 +129 val_129 +131 val_131 +133 val_133 +134 val_134 +134 val_134 +134 val_134 +134 val_134 +136 val_136 +137 val_137 +137 val_137 +137 val_137 +137 val_137 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +143 val_143 +145 val_145 +146 val_146 +146 val_146 +146 val_146 +146 val_146 +149 val_149 +149 val_149 +149 val_149 +149 val_149 +15 val_15 +15 val_15 +15 val_15 +15 val_15 +150 val_150 +152 val_152 +152 val_152 +152 val_152 +152 val_152 +153 val_153 +155 val_155 +156 val_156 +157 val_157 +158 val_158 +160 val_160 +162 val_162 +163 val_163 +164 val_164 +164 val_164 +164 val_164 +164 val_164 +165 val_165 +165 val_165 +165 val_165 +165 val_165 +166 val_166 +167 val_167 +167 val_167 +167 val_167 +167 val_167 +167 val_167 +167 val_167 +167 val_167 +167 val_167 +167 val_167 +168 val_168 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +17 val_17 +170 val_170 +172 val_172 +172 val_172 +172 val_172 +172 val_172 +174 val_174 +174 val_174 +174 val_174 +174 val_174 +175 val_175 +175 val_175 +175 val_175 +175 val_175 +176 val_176 +176 val_176 +176 val_176 +176 val_176 +177 val_177 +178 val_178 +179 val_179 +179 val_179 +179 val_179 +179 val_179 +18 val_18 +18 val_18 +18 val_18 +18 val_18 +180 val_180 +181 val_181 +183 val_183 +186 val_186 +187 val_187 +187 val_187 +187 val_187 +187 val_187 +187 val_187 +187 val_187 +187 val_187 +187 val_187 +187 val_187 +189 val_189 +19 val_19 +190 val_190 +191 val_191 +191 val_191 +191 val_191 +191 val_191 +192 val_192 +193 val_193 +193 val_193 +193 val_193 +193 val_193 +193 val_193 +193 val_193 +193 val_193 +193 val_193 +193 val_193 +194 val_194 +195 val_195 +195 val_195 +195 val_195 +195 val_195 +196 val_196 +197 val_197 +197 val_197 +197 val_197 +197 val_197 +199 val_199 +199 val_199 +199 val_199 +199 val_199 +199 val_199 +199 val_199 +199 val_199 +199 val_199 +199 val_199 +2 val_2 +20 val_20 +200 val_200 +200 val_200 +200 val_200 +200 val_200 +201 val_201 +202 val_202 +203 val_203 +203 val_203 +203 val_203 +203 val_203 +205 val_205 +205 val_205 +205 val_205 +205 val_205 +207 val_207 +207 val_207 +207 val_207 +207 val_207 +208 val_208 +208 val_208 +208 val_208 +208 val_208 +208 val_208 +208 val_208 +208 val_208 +208 val_208 +208 val_208 +209 val_209 +209 val_209 +209 val_209 +209 val_209 +213 val_213 +213 val_213 +213 val_213 +213 val_213 +214 val_214 +216 val_216 +216 val_216 +216 val_216 +216 val_216 +217 val_217 +217 val_217 +217 val_217 +217 val_217 +218 val_218 +219 val_219 +219 val_219 +219 val_219 +219 val_219 +221 val_221 +221 val_221 +221 val_221 +221 val_221 +222 val_222 +223 val_223 +223 val_223 +223 val_223 +223 val_223 +224 val_224 +224 val_224 +224 val_224 +224 val_224 +226 val_226 +228 val_228 +229 val_229 +229 val_229 +229 val_229 +229 val_229 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +233 val_233 +233 val_233 +233 val_233 +233 val_233 +235 val_235 +237 val_237 +237 val_237 +237 val_237 +237 val_237 +238 val_238 +238 val_238 +238 val_238 +238 val_238 +239 val_239 +239 val_239 +239 val_239 +239 val_239 +24 val_24 +24 val_24 +24 val_24 +24 val_24 +241 val_241 +242 val_242 +242 val_242 +242 val_242 +242 val_242 +244 val_244 +247 val_247 +248 val_248 +249 val_249 +252 val_252 +255 val_255 +255 val_255 +255 val_255 +255 val_255 +256 val_256 +256 val_256 +256 val_256 +256 val_256 +257 val_257 +258 val_258 +26 val_26 +26 val_26 +26 val_26 +26 val_26 +260 val_260 +262 val_262 +263 val_263 +265 val_265 +265 val_265 +265 val_265 +265 val_265 +266 val_266 +27 val_27 +272 val_272 +272 val_272 +272 val_272 +272 val_272 +273 val_273 +273 val_273 +273 val_273 +273 val_273 +273 val_273 +273 val_273 +273 val_273 +273 val_273 +273 val_273 +274 val_274 +275 val_275 +277 val_277 +277 val_277 +277 val_277 +277 val_277 +277 val_277 +277 val_277 +277 val_277 +277 val_277 +277 val_277 +277 val_277 +277 val_277 +277 val_277 +277 val_277 +277 val_277 +277 val_277 +277 val_277 +278 val_278 +278 val_278 +278 val_278 +278 val_278 +28 val_28 +280 val_280 +280 val_280 +280 val_280 +280 val_280 +281 val_281 +281 val_281 +281 val_281 +281 val_281 +282 val_282 +282 val_282 +282 val_282 +282 val_282 +283 val_283 +284 val_284 +285 val_285 +286 val_286 +287 val_287 +288 val_288 +288 val_288 +288 val_288 +288 val_288 +289 val_289 +291 val_291 +292 val_292 +296 val_296 +298 val_298 +298 val_298 +298 val_298 +298 val_298 +298 val_298 +298 val_298 +298 val_298 +298 val_298 +298 val_298 +30 val_30 +302 val_302 +305 val_305 +306 val_306 +307 val_307 +307 val_307 +307 val_307 +307 val_307 +308 val_308 +309 val_309 +309 val_309 +309 val_309 +309 val_309 +310 val_310 +311 val_311 +311 val_311 +311 val_311 +311 val_311 +311 val_311 +311 val_311 +311 val_311 +311 val_311 +311 val_311 +315 val_315 +316 val_316 +316 val_316 +316 val_316 +316 val_316 +316 val_316 +316 val_316 +316 val_316 +316 val_316 +316 val_316 +317 val_317 +317 val_317 +317 val_317 +317 val_317 +318 val_318 +318 val_318 +318 val_318 +318 val_318 +318 val_318 +318 val_318 +318 val_318 +318 val_318 +318 val_318 +321 val_321 +321 val_321 +321 val_321 +321 val_321 +322 val_322 +322 val_322 +322 val_322 +322 val_322 +323 val_323 +325 val_325 +325 val_325 +325 val_325 +325 val_325 +327 val_327 +327 val_327 +327 val_327 +327 val_327 +327 val_327 +327 val_327 +327 val_327 +327 val_327 +327 val_327 +33 val_33 +331 val_331 +331 val_331 +331 val_331 +331 val_331 +332 val_332 +333 val_333 +333 val_333 +333 val_333 +333 val_333 +335 val_335 +336 val_336 +338 val_338 +339 val_339 +34 val_34 +341 val_341 +342 val_342 +342 val_342 +342 val_342 +342 val_342 +344 val_344 +344 val_344 +344 val_344 +344 val_344 +345 val_345 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +35 val_35 +35 val_35 +35 val_35 +35 val_35 +35 val_35 +35 val_35 +35 val_35 +35 val_35 +35 val_35 +351 val_351 +353 val_353 +353 val_353 +353 val_353 +353 val_353 +356 val_356 +360 val_360 +362 val_362 +364 val_364 +365 val_365 +366 val_366 +367 val_367 +367 val_367 +367 val_367 +367 val_367 +368 val_368 +369 val_369 +369 val_369 +369 val_369 +369 val_369 +369 val_369 +369 val_369 +369 val_369 +369 val_369 +369 val_369 +37 val_37 +37 val_37 +37 val_37 +37 val_37 +373 val_373 +374 val_374 +375 val_375 +377 val_377 +378 val_378 +379 val_379 +382 val_382 +382 val_382 +382 val_382 +382 val_382 +384 val_384 +384 val_384 +384 val_384 +384 val_384 +384 val_384 +384 val_384 +384 val_384 +384 val_384 +384 val_384 +386 val_386 +389 val_389 +392 val_392 +393 val_393 +394 val_394 +395 val_395 +395 val_395 +395 val_395 +395 val_395 +396 val_396 +396 val_396 +396 val_396 +396 val_396 +396 val_396 +396 val_396 +396 val_396 +396 val_396 +396 val_396 +397 val_397 +397 val_397 +397 val_397 +397 val_397 +399 val_399 +399 val_399 +399 val_399 +399 val_399 +4 val_4 +400 val_400 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +402 val_402 +403 val_403 +403 val_403 +403 val_403 +403 val_403 +403 val_403 +403 val_403 +403 val_403 +403 val_403 +403 val_403 +404 val_404 +404 val_404 +404 val_404 +404 val_404 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +407 val_407 +409 val_409 +409 val_409 +409 val_409 +409 val_409 +409 val_409 +409 val_409 +409 val_409 +409 val_409 +409 val_409 +41 val_41 +411 val_411 +413 val_413 +413 val_413 +413 val_413 +413 val_413 +414 val_414 +414 val_414 +414 val_414 +414 val_414 +417 val_417 +417 val_417 +417 val_417 +417 val_417 +417 val_417 +417 val_417 +417 val_417 +417 val_417 +417 val_417 +418 val_418 +419 val_419 +42 val_42 +42 val_42 +42 val_42 +42 val_42 +421 val_421 +424 val_424 +424 val_424 +424 val_424 +424 val_424 +427 val_427 +429 val_429 +429 val_429 +429 val_429 +429 val_429 +43 val_43 +430 val_430 +430 val_430 +430 val_430 +430 val_430 +430 val_430 +430 val_430 +430 val_430 +430 val_430 +430 val_430 +431 val_431 +431 val_431 +431 val_431 +431 val_431 +431 val_431 +431 val_431 +431 val_431 +431 val_431 +431 val_431 +432 val_432 +435 val_435 +436 val_436 +437 val_437 +438 val_438 +438 val_438 +438 val_438 +438 val_438 +438 val_438 +438 val_438 +438 val_438 +438 val_438 +438 val_438 +439 val_439 +439 val_439 +439 val_439 +439 val_439 +44 val_44 +443 val_443 +444 val_444 +446 val_446 +448 val_448 +449 val_449 +452 val_452 +453 val_453 +454 val_454 +454 val_454 +454 val_454 +454 val_454 +454 val_454 +454 val_454 +454 val_454 +454 val_454 +454 val_454 +455 val_455 +457 val_457 +458 val_458 +458 val_458 +458 val_458 +458 val_458 +459 val_459 +459 val_459 +459 val_459 +459 val_459 +460 val_460 +462 val_462 +462 val_462 +462 val_462 +462 val_462 +463 val_463 +463 val_463 +463 val_463 +463 val_463 +466 val_466 +466 val_466 +466 val_466 +466 val_466 +466 val_466 +466 val_466 +466 val_466 +466 val_466 +466 val_466 +467 val_467 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +47 val_47 +470 val_470 +472 val_472 +475 val_475 +477 val_477 +478 val_478 +478 val_478 +478 val_478 +478 val_478 +479 val_479 +480 val_480 +480 val_480 +480 val_480 +480 val_480 +480 val_480 +480 val_480 +480 val_480 +480 val_480 +480 val_480 +481 val_481 +482 val_482 +483 val_483 +484 val_484 +485 val_485 +487 val_487 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +490 val_490 +491 val_491 +492 val_492 +492 val_492 +492 val_492 +492 val_492 +493 val_493 +494 val_494 +495 val_495 +496 val_496 +497 val_497 +498 val_498 +498 val_498 +498 val_498 +498 val_498 +498 val_498 +498 val_498 +498 val_498 +498 val_498 +498 val_498 +5 val_5 +5 val_5 +5 val_5 +5 val_5 +5 val_5 +5 val_5 +5 val_5 +5 val_5 +5 val_5 +51 val_51 +51 val_51 +51 val_51 +51 val_51 +53 val_53 +54 val_54 +57 val_57 +58 val_58 +58 val_58 +58 val_58 +58 val_58 +64 val_64 +65 val_65 +66 val_66 +67 val_67 +67 val_67 +67 val_67 +67 val_67 +69 val_69 +70 val_70 +70 val_70 +70 val_70 +70 val_70 +70 val_70 +70 val_70 +70 val_70 +70 val_70 +70 val_70 +72 val_72 +72 val_72 +72 val_72 +72 val_72 +74 val_74 +76 val_76 +76 val_76 +76 val_76 +76 val_76 +77 val_77 +78 val_78 +8 val_8 +80 val_80 +82 val_82 +83 val_83 +83 val_83 +83 val_83 +83 val_83 +84 val_84 +84 val_84 +84 val_84 +84 val_84 +85 val_85 +86 val_86 +87 val_87 +9 val_9 +90 val_90 +90 val_90 +90 val_90 +90 val_90 +90 val_90 +90 val_90 +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +98 val_98 +98 val_98 Index: ql/src/test/results/clientpositive/tez/leftsemijoin.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/leftsemijoin.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/leftsemijoin.q.out (working copy) @@ -0,0 +1,98 @@ +PREHOOK: query: drop table sales +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table sales +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table things +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table things +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE sales (name STRING, id INT) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE sales (name STRING, id INT) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@sales +PREHOOK: query: CREATE TABLE things (id INT, name STRING) partitioned by (ds string) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE things (id INT, name STRING) partitioned by (ds string) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@things +PREHOOK: query: load data local inpath '../../data/files/sales.txt' INTO TABLE sales +PREHOOK: type: LOAD +PREHOOK: Output: default@sales +POSTHOOK: query: load data local inpath '../../data/files/sales.txt' INTO TABLE sales +POSTHOOK: type: LOAD +POSTHOOK: Output: default@sales +PREHOOK: query: load data local inpath '../../data/files/things.txt' INTO TABLE things partition(ds='2011-10-23') +PREHOOK: type: LOAD +PREHOOK: Output: default@things +POSTHOOK: query: load data local inpath '../../data/files/things.txt' INTO TABLE things partition(ds='2011-10-23') +POSTHOOK: type: LOAD +POSTHOOK: Output: default@things +POSTHOOK: Output: default@things@ds=2011-10-23 +PREHOOK: query: load data local inpath '../../data/files/things2.txt' INTO TABLE things partition(ds='2011-10-24') +PREHOOK: type: LOAD +PREHOOK: Output: default@things +POSTHOOK: query: load data local inpath '../../data/files/things2.txt' INTO TABLE things partition(ds='2011-10-24') +POSTHOOK: type: LOAD +POSTHOOK: Output: default@things +POSTHOOK: Output: default@things@ds=2011-10-24 +PREHOOK: query: SELECT name,id FROM sales ORDER BY name ASC, id ASC +PREHOOK: type: QUERY +PREHOOK: Input: default@sales +#### A masked pattern was here #### +POSTHOOK: query: SELECT name,id FROM sales ORDER BY name ASC, id ASC +POSTHOOK: type: QUERY +POSTHOOK: Input: default@sales +#### A masked pattern was here #### +Hank 2 +Joe 2 +PREHOOK: query: SELECT id,name FROM things ORDER BY id ASC, name ASC +PREHOOK: type: QUERY +PREHOOK: Input: default@things +PREHOOK: Input: default@things@ds=2011-10-23 +PREHOOK: Input: default@things@ds=2011-10-24 +#### A masked pattern was here #### +POSTHOOK: query: SELECT id,name FROM things ORDER BY id ASC, name ASC +POSTHOOK: type: QUERY +POSTHOOK: Input: default@things +POSTHOOK: Input: default@things@ds=2011-10-23 +POSTHOOK: Input: default@things@ds=2011-10-24 +#### A masked pattern was here #### +2 Tie +2 Tie +PREHOOK: query: SELECT name,id FROM sales LEFT SEMI JOIN things ON (sales.id = things.id) ORDER BY name ASC, id ASC +PREHOOK: type: QUERY +PREHOOK: Input: default@sales +PREHOOK: Input: default@things +PREHOOK: Input: default@things@ds=2011-10-23 +PREHOOK: Input: default@things@ds=2011-10-24 +#### A masked pattern was here #### +POSTHOOK: query: SELECT name,id FROM sales LEFT SEMI JOIN things ON (sales.id = things.id) ORDER BY name ASC, id ASC +POSTHOOK: type: QUERY +POSTHOOK: Input: default@sales +POSTHOOK: Input: default@things +POSTHOOK: Input: default@things@ds=2011-10-23 +POSTHOOK: Input: default@things@ds=2011-10-24 +#### A masked pattern was here #### +Hank 2 +Joe 2 +PREHOOK: query: drop table sales +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@sales +PREHOOK: Output: default@sales +POSTHOOK: query: drop table sales +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@sales +POSTHOOK: Output: default@sales +PREHOOK: query: drop table things +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@things +PREHOOK: Output: default@things +POSTHOOK: query: drop table things +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@things +POSTHOOK: Output: default@things Index: ql/src/test/results/clientpositive/tez/limit_pushdown.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/limit_pushdown.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/limit_pushdown.q.out (working copy) @@ -0,0 +1,1584 @@ +PREHOOK: query: -- HIVE-3562 Some limit can be pushed down to map stage + +explain +select key,value from src order by key limit 20 +PREHOOK: type: QUERY +POSTHOOK: query: -- HIVE-3562 Some limit can be pushed down to map stage + +explain +select key,value from src order by key limit 20 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))) (TOK_LIMIT 20))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + tag: -1 + TopN: 20 + TopN Hash Memory Usage: 0.3 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 20 + +PREHOOK: query: select key,value from src order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select key,value from src order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +119 val_119 +PREHOOK: query: explain +select key,value from src order by key desc limit 20 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select key,value from src order by key desc limit 20 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEDESC (TOK_TABLE_OR_COL key))) (TOK_LIMIT 20))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: - + tag: -1 + TopN: 20 + TopN Hash Memory Usage: 0.3 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 20 + +PREHOOK: query: select key,value from src order by key desc limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select key,value from src order by key desc limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +98 val_98 +98 val_98 +97 val_97 +97 val_97 +96 val_96 +95 val_95 +95 val_95 +92 val_92 +90 val_90 +90 val_90 +90 val_90 +9 val_9 +87 val_87 +86 val_86 +85 val_85 +84 val_84 +84 val_84 +83 val_83 +83 val_83 +82 val_82 +PREHOOK: query: explain +select value, sum(key + 1) as sum from src group by value limit 20 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select value, sum(key + 1) as sum from src group by value limit 20 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_FUNCTION sum (+ (TOK_TABLE_OR_COL key) 1)) sum)) (TOK_GROUPBY (TOK_TABLE_OR_COL value)) (TOK_LIMIT 20))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: value + type: string + expr: key + type: string + outputColumnNames: value, key + Group By Operator + aggregations: + expr: sum((key + 1)) + bucketGroup: false + keys: + expr: value + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + TopN: 20 + TopN Hash Memory Usage: 0.3 + value expressions: + expr: _col1 + type: double + Reduce Operator Tree: + Group By Operator + aggregations: + expr: sum(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: double + outputColumnNames: _col0, _col1 + Limit + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 20 + +PREHOOK: query: select value, sum(key + 1) as sum from src group by value limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select value, sum(key + 1) as sum from src group by value limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +val_0 3.0 +val_10 11.0 +val_100 202.0 +val_103 208.0 +val_104 210.0 +val_105 106.0 +val_11 12.0 +val_111 112.0 +val_113 228.0 +val_114 115.0 +val_116 117.0 +val_118 238.0 +val_119 360.0 +val_12 26.0 +val_120 242.0 +val_125 252.0 +val_126 127.0 +val_128 387.0 +val_129 260.0 +val_131 132.0 +PREHOOK: query: -- deduped RS +explain +select value,avg(key + 1) from src group by value order by value limit 20 +PREHOOK: type: QUERY +POSTHOOK: query: -- deduped RS +explain +select value,avg(key + 1) from src group by value order by value limit 20 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_FUNCTION avg (+ (TOK_TABLE_OR_COL key) 1)))) (TOK_GROUPBY (TOK_TABLE_OR_COL value)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 20))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: value + type: string + expr: key + type: string + outputColumnNames: value, key + Group By Operator + aggregations: + expr: avg((key + 1)) + bucketGroup: false + keys: + expr: value + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + TopN: 20 + TopN Hash Memory Usage: 0.3 + value expressions: + expr: _col1 + type: struct + Reduce Operator Tree: + Group By Operator + aggregations: + expr: avg(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: double + outputColumnNames: _col0, _col1 + Limit + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 20 + +PREHOOK: query: select value,avg(key + 1) from src group by value order by value limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select value,avg(key + 1) from src group by value order by value limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +val_0 1.0 +val_10 11.0 +val_100 101.0 +val_103 104.0 +val_104 105.0 +val_105 106.0 +val_11 12.0 +val_111 112.0 +val_113 114.0 +val_114 115.0 +val_116 117.0 +val_118 119.0 +val_119 120.0 +val_12 13.0 +val_120 121.0 +val_125 126.0 +val_126 127.0 +val_128 129.0 +val_129 130.0 +val_131 132.0 +PREHOOK: query: -- distincts +explain +select distinct(cdouble) from alltypesorc limit 20 +PREHOOK: type: QUERY +POSTHOOK: query: -- distincts +explain +select distinct(cdouble) from alltypesorc limit 20 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME alltypesorc))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECTDI (TOK_SELEXPR (TOK_TABLE_OR_COL cdouble))) (TOK_LIMIT 20))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + alltypesorc + TableScan + alias: alltypesorc + Select Operator + expressions: + expr: cdouble + type: double + outputColumnNames: cdouble + Group By Operator + bucketGroup: false + keys: + expr: cdouble + type: double + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: + expr: _col0 + type: double + sort order: + + Map-reduce partition columns: + expr: _col0 + type: double + tag: -1 + TopN: 20 + TopN Hash Memory Usage: 0.3 + Reduce Operator Tree: + Group By Operator + bucketGroup: false + keys: + expr: KEY._col0 + type: double + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: + expr: _col0 + type: double + outputColumnNames: _col0 + Limit + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 20 + +PREHOOK: query: select distinct(cdouble) from alltypesorc limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select distinct(cdouble) from alltypesorc limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +NULL +-16379.0 +-16373.0 +-16372.0 +-16369.0 +-16355.0 +-16339.0 +-16324.0 +-16311.0 +-16310.0 +-16309.0 +-16307.0 +-16306.0 +-16305.0 +-16300.0 +-16296.0 +-16280.0 +-16277.0 +-16274.0 +-16269.0 +PREHOOK: query: explain +select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint limit 20 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint limit 20 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME alltypesorc))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL ctinyint)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL cdouble)))) (TOK_GROUPBY (TOK_TABLE_OR_COL ctinyint)) (TOK_LIMIT 20))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + alltypesorc + TableScan + alias: alltypesorc + Select Operator + expressions: + expr: ctinyint + type: tinyint + expr: cdouble + type: double + outputColumnNames: ctinyint, cdouble + Group By Operator + aggregations: + expr: count(DISTINCT cdouble) + bucketGroup: false + keys: + expr: ctinyint + type: tinyint + expr: cdouble + type: double + mode: hash + outputColumnNames: _col0, _col1, _col2 + Reduce Output Operator + key expressions: + expr: _col0 + type: tinyint + expr: _col1 + type: double + sort order: ++ + Map-reduce partition columns: + expr: _col0 + type: tinyint + tag: -1 + TopN: 20 + TopN Hash Memory Usage: 0.3 + value expressions: + expr: _col2 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(DISTINCT KEY._col1:0._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: tinyint + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: tinyint + expr: _col1 + type: bigint + outputColumnNames: _col0, _col1 + Limit + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 20 + +PREHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +NULL 2932 +-64 24 +-63 19 +-62 27 +-61 25 +-60 27 +-59 31 +-58 23 +-57 35 +-56 36 +-55 29 +-54 26 +-53 22 +-52 33 +-51 21 +-50 30 +-49 26 +-48 29 +-47 22 +-46 24 +PREHOOK: query: -- multi distinct +explain +select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint limit 20 +PREHOOK: type: QUERY +POSTHOOK: query: -- multi distinct +explain +select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint limit 20 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME alltypesorc))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL ctinyint)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL cstring1))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL cstring2)))) (TOK_GROUPBY (TOK_TABLE_OR_COL ctinyint)) (TOK_LIMIT 20))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + alltypesorc + TableScan + alias: alltypesorc + Select Operator + expressions: + expr: ctinyint + type: tinyint + expr: cstring1 + type: string + expr: cstring2 + type: string + outputColumnNames: ctinyint, cstring1, cstring2 + Group By Operator + aggregations: + expr: count(DISTINCT cstring1) + expr: count(DISTINCT cstring2) + bucketGroup: false + keys: + expr: ctinyint + type: tinyint + expr: cstring1 + type: string + expr: cstring2 + type: string + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Reduce Output Operator + key expressions: + expr: _col0 + type: tinyint + expr: _col1 + type: string + expr: _col2 + type: string + sort order: +++ + Map-reduce partition columns: + expr: _col0 + type: tinyint + tag: -1 + TopN: 20 + TopN Hash Memory Usage: 0.3 + value expressions: + expr: _col3 + type: bigint + expr: _col4 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(DISTINCT KEY._col1:0._col0) + expr: count(DISTINCT KEY._col1:1._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: tinyint + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Select Operator + expressions: + expr: _col0 + type: tinyint + expr: _col1 + type: bigint + expr: _col2 + type: bigint + outputColumnNames: _col0, _col1, _col2 + Limit + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 20 + +PREHOOK: query: select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +NULL 3065 3 +-64 3 13 +-63 3 16 +-62 3 23 +-61 3 25 +-60 3 25 +-59 3 27 +-58 3 24 +-57 3 23 +-56 3 22 +-55 3 21 +-54 3 21 +-53 3 17 +-52 3 21 +-51 1012 1045 +-50 3 25 +-49 3 24 +-48 3 27 +-47 3 23 +-46 3 19 +PREHOOK: query: -- limit zero +explain +select key,value from src order by key limit 0 +PREHOOK: type: QUERY +POSTHOOK: query: -- limit zero +explain +select key,value from src order by key limit 0 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))) (TOK_LIMIT 0))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Limit + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 0 + +PREHOOK: query: select key,value from src order by key limit 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select key,value from src order by key limit 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +PREHOOK: query: -- 2MR (applied to last RS) +explain +select value, sum(key) as sum from src group by value order by sum limit 20 +PREHOOK: type: QUERY +POSTHOOK: query: -- 2MR (applied to last RS) +explain +select value, sum(key) as sum from src group by value order by sum limit 20 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_TABLE_OR_COL key)) sum)) (TOK_GROUPBY (TOK_TABLE_OR_COL value)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL sum))) (TOK_LIMIT 20))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: value + type: string + expr: key + type: string + outputColumnNames: value, key + Group By Operator + aggregations: + expr: sum(key) + bucketGroup: false + keys: + expr: value + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: double + Reduce Operator Tree: + Group By Operator + aggregations: + expr: sum(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: double + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col1 + type: double + sort order: + + tag: -1 + TopN: 20 + TopN Hash Memory Usage: 0.3 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: double + Reduce Operator Tree: + Extract + Limit + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 20 + +PREHOOK: query: select value, sum(key) as sum from src group by value order by sum limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select value, sum(key) as sum from src group by value order by sum limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +val_0 0.0 +val_2 2.0 +val_4 4.0 +val_8 8.0 +val_9 9.0 +val_10 10.0 +val_11 11.0 +val_5 15.0 +val_17 17.0 +val_19 19.0 +val_20 20.0 +val_12 24.0 +val_27 27.0 +val_28 28.0 +val_30 30.0 +val_15 30.0 +val_33 33.0 +val_34 34.0 +val_18 36.0 +val_41 41.0 +PREHOOK: query: -- subqueries +explain +select * from +(select key, count(1) from src group by key order by key limit 2) subq +join +(select key, count(1) from src group by key limit 3) subq2 +on subq.key=subq2.key limit 4 +PREHOOK: type: QUERY +POSTHOOK: query: -- subqueries +explain +select * from +(select key, count(1) from src group by key order by key limit 2) subq +join +(select key, count(1) from src group by key limit 3) subq2 +on subq.key=subq2.key limit 4 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))) (TOK_LIMIT 2))) subq) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_LIMIT 3))) subq2) (= (. (TOK_TABLE_OR_COL subq) key) (. (TOK_TABLE_OR_COL subq2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 4))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + outputColumnNames: key + Group By Operator + aggregations: + expr: count(1) + bucketGroup: false + keys: + expr: key + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + TopN: 2 + TopN Hash Memory Usage: 0.3 + value expressions: + expr: _col1 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + outputColumnNames: _col0, _col1 + Limit + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: 0 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + outputColumnNames: key + Group By Operator + aggregations: + expr: count(1) + bucketGroup: false + keys: + expr: key + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + TopN: 3 + TopN Hash Memory Usage: 0.3 + value expressions: + expr: _col1 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + outputColumnNames: _col0, _col1 + Limit + Reduce Output Operator + sort order: + tag: -1 + TopN: 3 + TopN Hash Memory Usage: 0.3 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + Reduce Operator Tree: + Extract + Limit + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: 1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col0} {VALUE._col1} + 1 {VALUE._col0} {VALUE._col1} + handleSkewJoin: false + outputColumnNames: _col0, _col1, _col2, _col3 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + expr: _col2 + type: string + expr: _col3 + type: bigint + outputColumnNames: _col0, _col1, _col2, _col3 + Limit + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 4 + +PREHOOK: query: -- map aggregation disabled +explain +select value, sum(key) as sum from src group by value limit 20 +PREHOOK: type: QUERY +POSTHOOK: query: -- map aggregation disabled +explain +select value, sum(key) as sum from src group by value limit 20 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_TABLE_OR_COL key)) sum)) (TOK_GROUPBY (TOK_TABLE_OR_COL value)) (TOK_LIMIT 20))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: value + type: string + expr: key + type: string + outputColumnNames: value, key + Reduce Output Operator + key expressions: + expr: value + type: string + sort order: + + Map-reduce partition columns: + expr: value + type: string + tag: -1 + TopN: 20 + TopN Hash Memory Usage: 0.3 + value expressions: + expr: key + type: string + Reduce Operator Tree: + Group By Operator + aggregations: + expr: sum(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: complete + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: double + outputColumnNames: _col0, _col1 + Limit + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 20 + +PREHOOK: query: select value, sum(key) as sum from src group by value limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select value, sum(key) as sum from src group by value limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +val_0 0.0 +val_10 10.0 +val_100 200.0 +val_103 206.0 +val_104 208.0 +val_105 105.0 +val_11 11.0 +val_111 111.0 +val_113 226.0 +val_114 114.0 +val_116 116.0 +val_118 236.0 +val_119 357.0 +val_12 24.0 +val_120 240.0 +val_125 250.0 +val_126 126.0 +val_128 384.0 +val_129 258.0 +val_131 131.0 +PREHOOK: query: -- flush for order-by +explain +select key,value,value,value,value,value,value,value,value from src order by key limit 100 +PREHOOK: type: QUERY +POSTHOOK: query: -- flush for order-by +explain +select key,value,value,value,value,value,value,value,value from src order by key limit 100 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))) (TOK_LIMIT 100))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + expr: value + type: string + expr: value + type: string + expr: value + type: string + expr: value + type: string + expr: value + type: string + expr: value + type: string + expr: value + type: string + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + tag: -1 + TopN: 100 + TopN Hash Memory Usage: 2.0E-5 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col2 + type: string + expr: _col3 + type: string + expr: _col4 + type: string + expr: _col5 + type: string + expr: _col6 + type: string + expr: _col7 + type: string + expr: _col8 + type: string + Reduce Operator Tree: + Extract + Limit + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 100 + +PREHOOK: query: select key,value,value,value,value,value,value,value,value from src order by key limit 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select key,value,value,value,value,value,value,value,value from src order by key limit 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 val_0 val_0 val_0 val_0 val_0 val_0 val_0 val_0 +0 val_0 val_0 val_0 val_0 val_0 val_0 val_0 val_0 +0 val_0 val_0 val_0 val_0 val_0 val_0 val_0 val_0 +10 val_10 val_10 val_10 val_10 val_10 val_10 val_10 val_10 +100 val_100 val_100 val_100 val_100 val_100 val_100 val_100 val_100 +100 val_100 val_100 val_100 val_100 val_100 val_100 val_100 val_100 +103 val_103 val_103 val_103 val_103 val_103 val_103 val_103 val_103 +103 val_103 val_103 val_103 val_103 val_103 val_103 val_103 val_103 +104 val_104 val_104 val_104 val_104 val_104 val_104 val_104 val_104 +104 val_104 val_104 val_104 val_104 val_104 val_104 val_104 val_104 +105 val_105 val_105 val_105 val_105 val_105 val_105 val_105 val_105 +11 val_11 val_11 val_11 val_11 val_11 val_11 val_11 val_11 +111 val_111 val_111 val_111 val_111 val_111 val_111 val_111 val_111 +113 val_113 val_113 val_113 val_113 val_113 val_113 val_113 val_113 +113 val_113 val_113 val_113 val_113 val_113 val_113 val_113 val_113 +114 val_114 val_114 val_114 val_114 val_114 val_114 val_114 val_114 +116 val_116 val_116 val_116 val_116 val_116 val_116 val_116 val_116 +118 val_118 val_118 val_118 val_118 val_118 val_118 val_118 val_118 +118 val_118 val_118 val_118 val_118 val_118 val_118 val_118 val_118 +119 val_119 val_119 val_119 val_119 val_119 val_119 val_119 val_119 +119 val_119 val_119 val_119 val_119 val_119 val_119 val_119 val_119 +119 val_119 val_119 val_119 val_119 val_119 val_119 val_119 val_119 +12 val_12 val_12 val_12 val_12 val_12 val_12 val_12 val_12 +12 val_12 val_12 val_12 val_12 val_12 val_12 val_12 val_12 +120 val_120 val_120 val_120 val_120 val_120 val_120 val_120 val_120 +120 val_120 val_120 val_120 val_120 val_120 val_120 val_120 val_120 +125 val_125 val_125 val_125 val_125 val_125 val_125 val_125 val_125 +125 val_125 val_125 val_125 val_125 val_125 val_125 val_125 val_125 +126 val_126 val_126 val_126 val_126 val_126 val_126 val_126 val_126 +128 val_128 val_128 val_128 val_128 val_128 val_128 val_128 val_128 +128 val_128 val_128 val_128 val_128 val_128 val_128 val_128 val_128 +128 val_128 val_128 val_128 val_128 val_128 val_128 val_128 val_128 +129 val_129 val_129 val_129 val_129 val_129 val_129 val_129 val_129 +129 val_129 val_129 val_129 val_129 val_129 val_129 val_129 val_129 +131 val_131 val_131 val_131 val_131 val_131 val_131 val_131 val_131 +133 val_133 val_133 val_133 val_133 val_133 val_133 val_133 val_133 +134 val_134 val_134 val_134 val_134 val_134 val_134 val_134 val_134 +134 val_134 val_134 val_134 val_134 val_134 val_134 val_134 val_134 +136 val_136 val_136 val_136 val_136 val_136 val_136 val_136 val_136 +137 val_137 val_137 val_137 val_137 val_137 val_137 val_137 val_137 +137 val_137 val_137 val_137 val_137 val_137 val_137 val_137 val_137 +138 val_138 val_138 val_138 val_138 val_138 val_138 val_138 val_138 +138 val_138 val_138 val_138 val_138 val_138 val_138 val_138 val_138 +138 val_138 val_138 val_138 val_138 val_138 val_138 val_138 val_138 +138 val_138 val_138 val_138 val_138 val_138 val_138 val_138 val_138 +143 val_143 val_143 val_143 val_143 val_143 val_143 val_143 val_143 +145 val_145 val_145 val_145 val_145 val_145 val_145 val_145 val_145 +146 val_146 val_146 val_146 val_146 val_146 val_146 val_146 val_146 +146 val_146 val_146 val_146 val_146 val_146 val_146 val_146 val_146 +149 val_149 val_149 val_149 val_149 val_149 val_149 val_149 val_149 +149 val_149 val_149 val_149 val_149 val_149 val_149 val_149 val_149 +15 val_15 val_15 val_15 val_15 val_15 val_15 val_15 val_15 +15 val_15 val_15 val_15 val_15 val_15 val_15 val_15 val_15 +150 val_150 val_150 val_150 val_150 val_150 val_150 val_150 val_150 +152 val_152 val_152 val_152 val_152 val_152 val_152 val_152 val_152 +152 val_152 val_152 val_152 val_152 val_152 val_152 val_152 val_152 +153 val_153 val_153 val_153 val_153 val_153 val_153 val_153 val_153 +155 val_155 val_155 val_155 val_155 val_155 val_155 val_155 val_155 +156 val_156 val_156 val_156 val_156 val_156 val_156 val_156 val_156 +157 val_157 val_157 val_157 val_157 val_157 val_157 val_157 val_157 +158 val_158 val_158 val_158 val_158 val_158 val_158 val_158 val_158 +160 val_160 val_160 val_160 val_160 val_160 val_160 val_160 val_160 +162 val_162 val_162 val_162 val_162 val_162 val_162 val_162 val_162 +163 val_163 val_163 val_163 val_163 val_163 val_163 val_163 val_163 +164 val_164 val_164 val_164 val_164 val_164 val_164 val_164 val_164 +164 val_164 val_164 val_164 val_164 val_164 val_164 val_164 val_164 +165 val_165 val_165 val_165 val_165 val_165 val_165 val_165 val_165 +165 val_165 val_165 val_165 val_165 val_165 val_165 val_165 val_165 +166 val_166 val_166 val_166 val_166 val_166 val_166 val_166 val_166 +167 val_167 val_167 val_167 val_167 val_167 val_167 val_167 val_167 +167 val_167 val_167 val_167 val_167 val_167 val_167 val_167 val_167 +167 val_167 val_167 val_167 val_167 val_167 val_167 val_167 val_167 +168 val_168 val_168 val_168 val_168 val_168 val_168 val_168 val_168 +169 val_169 val_169 val_169 val_169 val_169 val_169 val_169 val_169 +169 val_169 val_169 val_169 val_169 val_169 val_169 val_169 val_169 +169 val_169 val_169 val_169 val_169 val_169 val_169 val_169 val_169 +169 val_169 val_169 val_169 val_169 val_169 val_169 val_169 val_169 +17 val_17 val_17 val_17 val_17 val_17 val_17 val_17 val_17 +170 val_170 val_170 val_170 val_170 val_170 val_170 val_170 val_170 +172 val_172 val_172 val_172 val_172 val_172 val_172 val_172 val_172 +172 val_172 val_172 val_172 val_172 val_172 val_172 val_172 val_172 +174 val_174 val_174 val_174 val_174 val_174 val_174 val_174 val_174 +174 val_174 val_174 val_174 val_174 val_174 val_174 val_174 val_174 +175 val_175 val_175 val_175 val_175 val_175 val_175 val_175 val_175 +175 val_175 val_175 val_175 val_175 val_175 val_175 val_175 val_175 +176 val_176 val_176 val_176 val_176 val_176 val_176 val_176 val_176 +176 val_176 val_176 val_176 val_176 val_176 val_176 val_176 val_176 +177 val_177 val_177 val_177 val_177 val_177 val_177 val_177 val_177 +178 val_178 val_178 val_178 val_178 val_178 val_178 val_178 val_178 +179 val_179 val_179 val_179 val_179 val_179 val_179 val_179 val_179 +179 val_179 val_179 val_179 val_179 val_179 val_179 val_179 val_179 +18 val_18 val_18 val_18 val_18 val_18 val_18 val_18 val_18 +18 val_18 val_18 val_18 val_18 val_18 val_18 val_18 val_18 +180 val_180 val_180 val_180 val_180 val_180 val_180 val_180 val_180 +181 val_181 val_181 val_181 val_181 val_181 val_181 val_181 val_181 +183 val_183 val_183 val_183 val_183 val_183 val_183 val_183 val_183 +186 val_186 val_186 val_186 val_186 val_186 val_186 val_186 val_186 +187 val_187 val_187 val_187 val_187 val_187 val_187 val_187 val_187 +187 val_187 val_187 val_187 val_187 val_187 val_187 val_187 val_187 +187 val_187 val_187 val_187 val_187 val_187 val_187 val_187 val_187 +PREHOOK: query: -- flush for group-by +explain +select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) limit 100 +PREHOOK: type: QUERY +POSTHOOK: query: -- flush for group-by +explain +select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) limit 100 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_TABLE_OR_COL key)) sum)) (TOK_GROUPBY (TOK_FUNCTION concat (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL value) (TOK_TABLE_OR_COL value) (TOK_TABLE_OR_COL value) (TOK_TABLE_OR_COL value) (TOK_TABLE_OR_COL value) (TOK_TABLE_OR_COL value) (TOK_TABLE_OR_COL value) (TOK_TABLE_OR_COL value) (TOK_TABLE_OR_COL value))) (TOK_LIMIT 100))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: key, value + Reduce Output Operator + key expressions: + expr: concat(key, value, value, value, value, value, value, value, value, value) + type: string + sort order: + + Map-reduce partition columns: + expr: concat(key, value, value, value, value, value, value, value, value, value) + type: string + tag: -1 + TopN: 100 + TopN Hash Memory Usage: 2.0E-5 + value expressions: + expr: key + type: string + Reduce Operator Tree: + Group By Operator + aggregations: + expr: sum(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: complete + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col1 + type: double + outputColumnNames: _col0 + Limit + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 100 + +PREHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) limit 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) limit 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0.0 +200.0 +206.0 +208.0 +105.0 +10.0 +111.0 +226.0 +114.0 +116.0 +236.0 +357.0 +11.0 +240.0 +250.0 +126.0 +384.0 +258.0 +24.0 +131.0 +133.0 +268.0 +136.0 +274.0 +552.0 +143.0 +145.0 +292.0 +298.0 +150.0 +304.0 +153.0 +155.0 +156.0 +157.0 +158.0 +30.0 +160.0 +162.0 +163.0 +328.0 +330.0 +166.0 +501.0 +168.0 +676.0 +170.0 +344.0 +348.0 +350.0 +352.0 +177.0 +178.0 +358.0 +17.0 +180.0 +181.0 +183.0 +186.0 +561.0 +189.0 +36.0 +190.0 +382.0 +192.0 +579.0 +194.0 +390.0 +196.0 +394.0 +597.0 +19.0 +400.0 +201.0 +202.0 +406.0 +410.0 +414.0 +624.0 +418.0 +20.0 +426.0 +214.0 +432.0 +434.0 +218.0 +438.0 +442.0 +222.0 +446.0 +448.0 +226.0 +228.0 +458.0 +1150.0 +466.0 +235.0 +474.0 +476.0 +478.0 Index: ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out (working copy) @@ -0,0 +1,2338 @@ +PREHOOK: query: show partitions srcpart +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: show partitions srcpart +POSTHOOK: type: SHOWPARTITIONS +ds=2008-04-08/hr=11 +ds=2008-04-08/hr=12 +ds=2008-04-09/hr=11 +ds=2008-04-09/hr=12 +PREHOOK: query: create table if not exists nzhang_part1 like srcpart +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table if not exists nzhang_part1 like srcpart +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@nzhang_part1 +PREHOOK: query: create table if not exists nzhang_part2 like srcpart +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table if not exists nzhang_part2 like srcpart +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@nzhang_part2 +PREHOOK: query: describe extended nzhang_part1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe extended nzhang_part1 +POSTHOOK: type: DESCTABLE +key string default +value string default +ds string None +hr string None + +# Partition Information +# col_name data_type comment + +ds string None +hr string None + +#### A masked pattern was here #### +PREHOOK: query: explain +from srcpart +insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: explain +from srcpart +insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME nzhang_part1) (TOK_PARTSPEC (TOK_PARTVAL ds) (TOK_PARTVAL hr)))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_TABLE_OR_COL ds)) (TOK_SELEXPR (TOK_TABLE_OR_COL hr))) (TOK_WHERE (<= (TOK_TABLE_OR_COL ds) '2008-04-08'))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME nzhang_part2) (TOK_PARTSPEC (TOK_PARTVAL ds '2008-12-31') (TOK_PARTVAL hr)))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_TABLE_OR_COL hr))) (TOK_WHERE (> (TOK_TABLE_OR_COL ds) '2008-04-08')))) + +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-9 depends on stages: Stage-2 , consists of Stage-6, Stage-5, Stage-7 + Stage-6 + Stage-3 depends on stages: Stage-6, Stage-5, Stage-8, Stage-12, Stage-11, Stage-14 + Stage-0 depends on stages: Stage-3 + Stage-4 depends on stages: Stage-0 + Stage-1 depends on stages: Stage-3 + Stage-10 depends on stages: Stage-1 + Stage-5 + Stage-7 + Stage-8 depends on stages: Stage-7 + Stage-15 depends on stages: Stage-2 , consists of Stage-12, Stage-11, Stage-13 + Stage-12 + Stage-11 + Stage-13 + Stage-14 depends on stages: Stage-13 + +STAGE PLANS: + Stage: Stage-2 + Tez + Alias -> Map Operator Tree: + srcpart + TableScan + alias: srcpart + Filter Operator + predicate: + expr: (ds <= '2008-04-08') + type: boolean + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + expr: ds + type: string + expr: hr + type: string + outputColumnNames: _col0, _col1, _col2, _col3 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part1 + Filter Operator + predicate: + expr: (ds > '2008-04-08') + type: boolean + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + expr: hr + type: string + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + GlobalTableId: 2 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part2 + + Stage: Stage-9 + Conditional Operator + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-3 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds + hr + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part1 + + Stage: Stage-4 + Stats-Aggr Operator + + Stage: Stage-1 + Move Operator + tables: + partition: + ds 2008-12-31 + hr + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part2 + + Stage: Stage-10 + Stats-Aggr Operator + + Stage: Stage-5 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part1 + + Stage: Stage-7 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part1 + + Stage: Stage-8 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-15 + Conditional Operator + + Stage: Stage-12 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-11 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part2 + + Stage: Stage-13 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part2 + + Stage: Stage-14 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: from srcpart +insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@nzhang_part1 +PREHOOK: Output: default@nzhang_part2@ds=2008-12-31 +POSTHOOK: query: from srcpart +insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@nzhang_part1@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@nzhang_part1@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@nzhang_part2@ds=2008-12-31/hr=11 +POSTHOOK: Output: default@nzhang_part2@ds=2008-12-31/hr=12 +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions nzhang_part1 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: show partitions nzhang_part1 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +ds=2008-04-08/hr=11 +ds=2008-04-08/hr=12 +PREHOOK: query: show partitions nzhang_part2 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: show partitions nzhang_part2 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +ds=2008-12-31/hr=11 +ds=2008-12-31/hr=12 +PREHOOK: query: select * from nzhang_part1 where ds is not null and hr is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@nzhang_part1 +PREHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=11 +PREHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select * from nzhang_part1 where ds is not null and hr is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@nzhang_part1 +POSTHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 2008-04-08 11 +86 val_86 2008-04-08 11 +311 val_311 2008-04-08 11 +27 val_27 2008-04-08 11 +165 val_165 2008-04-08 11 +409 val_409 2008-04-08 11 +255 val_255 2008-04-08 11 +278 val_278 2008-04-08 11 +98 val_98 2008-04-08 11 +484 val_484 2008-04-08 11 +265 val_265 2008-04-08 11 +193 val_193 2008-04-08 11 +401 val_401 2008-04-08 11 +150 val_150 2008-04-08 11 +273 val_273 2008-04-08 11 +224 val_224 2008-04-08 11 +369 val_369 2008-04-08 11 +66 val_66 2008-04-08 11 +128 val_128 2008-04-08 11 +213 val_213 2008-04-08 11 +146 val_146 2008-04-08 11 +406 val_406 2008-04-08 11 +429 val_429 2008-04-08 11 +374 val_374 2008-04-08 11 +152 val_152 2008-04-08 11 +469 val_469 2008-04-08 11 +145 val_145 2008-04-08 11 +495 val_495 2008-04-08 11 +37 val_37 2008-04-08 11 +327 val_327 2008-04-08 11 +281 val_281 2008-04-08 11 +277 val_277 2008-04-08 11 +209 val_209 2008-04-08 11 +15 val_15 2008-04-08 11 +82 val_82 2008-04-08 11 +403 val_403 2008-04-08 11 +166 val_166 2008-04-08 11 +417 val_417 2008-04-08 11 +430 val_430 2008-04-08 11 +252 val_252 2008-04-08 11 +292 val_292 2008-04-08 11 +219 val_219 2008-04-08 11 +287 val_287 2008-04-08 11 +153 val_153 2008-04-08 11 +193 val_193 2008-04-08 11 +338 val_338 2008-04-08 11 +446 val_446 2008-04-08 11 +459 val_459 2008-04-08 11 +394 val_394 2008-04-08 11 +237 val_237 2008-04-08 11 +482 val_482 2008-04-08 11 +174 val_174 2008-04-08 11 +413 val_413 2008-04-08 11 +494 val_494 2008-04-08 11 +207 val_207 2008-04-08 11 +199 val_199 2008-04-08 11 +466 val_466 2008-04-08 11 +208 val_208 2008-04-08 11 +174 val_174 2008-04-08 11 +399 val_399 2008-04-08 11 +396 val_396 2008-04-08 11 +247 val_247 2008-04-08 11 +417 val_417 2008-04-08 11 +489 val_489 2008-04-08 11 +162 val_162 2008-04-08 11 +377 val_377 2008-04-08 11 +397 val_397 2008-04-08 11 +309 val_309 2008-04-08 11 +365 val_365 2008-04-08 11 +266 val_266 2008-04-08 11 +439 val_439 2008-04-08 11 +342 val_342 2008-04-08 11 +367 val_367 2008-04-08 11 +325 val_325 2008-04-08 11 +167 val_167 2008-04-08 11 +195 val_195 2008-04-08 11 +475 val_475 2008-04-08 11 +17 val_17 2008-04-08 11 +113 val_113 2008-04-08 11 +155 val_155 2008-04-08 11 +203 val_203 2008-04-08 11 +339 val_339 2008-04-08 11 +0 val_0 2008-04-08 11 +455 val_455 2008-04-08 11 +128 val_128 2008-04-08 11 +311 val_311 2008-04-08 11 +316 val_316 2008-04-08 11 +57 val_57 2008-04-08 11 +302 val_302 2008-04-08 11 +205 val_205 2008-04-08 11 +149 val_149 2008-04-08 11 +438 val_438 2008-04-08 11 +345 val_345 2008-04-08 11 +129 val_129 2008-04-08 11 +170 val_170 2008-04-08 11 +20 val_20 2008-04-08 11 +489 val_489 2008-04-08 11 +157 val_157 2008-04-08 11 +378 val_378 2008-04-08 11 +221 val_221 2008-04-08 11 +92 val_92 2008-04-08 11 +111 val_111 2008-04-08 11 +47 val_47 2008-04-08 11 +72 val_72 2008-04-08 11 +4 val_4 2008-04-08 11 +280 val_280 2008-04-08 11 +35 val_35 2008-04-08 11 +427 val_427 2008-04-08 11 +277 val_277 2008-04-08 11 +208 val_208 2008-04-08 11 +356 val_356 2008-04-08 11 +399 val_399 2008-04-08 11 +169 val_169 2008-04-08 11 +382 val_382 2008-04-08 11 +498 val_498 2008-04-08 11 +125 val_125 2008-04-08 11 +386 val_386 2008-04-08 11 +437 val_437 2008-04-08 11 +469 val_469 2008-04-08 11 +192 val_192 2008-04-08 11 +286 val_286 2008-04-08 11 +187 val_187 2008-04-08 11 +176 val_176 2008-04-08 11 +54 val_54 2008-04-08 11 +459 val_459 2008-04-08 11 +51 val_51 2008-04-08 11 +138 val_138 2008-04-08 11 +103 val_103 2008-04-08 11 +239 val_239 2008-04-08 11 +213 val_213 2008-04-08 11 +216 val_216 2008-04-08 11 +430 val_430 2008-04-08 11 +278 val_278 2008-04-08 11 +176 val_176 2008-04-08 11 +289 val_289 2008-04-08 11 +221 val_221 2008-04-08 11 +65 val_65 2008-04-08 11 +318 val_318 2008-04-08 11 +332 val_332 2008-04-08 11 +311 val_311 2008-04-08 11 +275 val_275 2008-04-08 11 +137 val_137 2008-04-08 11 +241 val_241 2008-04-08 11 +83 val_83 2008-04-08 11 +333 val_333 2008-04-08 11 +180 val_180 2008-04-08 11 +284 val_284 2008-04-08 11 +12 val_12 2008-04-08 11 +230 val_230 2008-04-08 11 +181 val_181 2008-04-08 11 +67 val_67 2008-04-08 11 +260 val_260 2008-04-08 11 +404 val_404 2008-04-08 11 +384 val_384 2008-04-08 11 +489 val_489 2008-04-08 11 +353 val_353 2008-04-08 11 +373 val_373 2008-04-08 11 +272 val_272 2008-04-08 11 +138 val_138 2008-04-08 11 +217 val_217 2008-04-08 11 +84 val_84 2008-04-08 11 +348 val_348 2008-04-08 11 +466 val_466 2008-04-08 11 +58 val_58 2008-04-08 11 +8 val_8 2008-04-08 11 +411 val_411 2008-04-08 11 +230 val_230 2008-04-08 11 +208 val_208 2008-04-08 11 +348 val_348 2008-04-08 11 +24 val_24 2008-04-08 11 +463 val_463 2008-04-08 11 +431 val_431 2008-04-08 11 +179 val_179 2008-04-08 11 +172 val_172 2008-04-08 11 +42 val_42 2008-04-08 11 +129 val_129 2008-04-08 11 +158 val_158 2008-04-08 11 +119 val_119 2008-04-08 11 +496 val_496 2008-04-08 11 +0 val_0 2008-04-08 11 +322 val_322 2008-04-08 11 +197 val_197 2008-04-08 11 +468 val_468 2008-04-08 11 +393 val_393 2008-04-08 11 +454 val_454 2008-04-08 11 +100 val_100 2008-04-08 11 +298 val_298 2008-04-08 11 +199 val_199 2008-04-08 11 +191 val_191 2008-04-08 11 +418 val_418 2008-04-08 11 +96 val_96 2008-04-08 11 +26 val_26 2008-04-08 11 +165 val_165 2008-04-08 11 +327 val_327 2008-04-08 11 +230 val_230 2008-04-08 11 +205 val_205 2008-04-08 11 +120 val_120 2008-04-08 11 +131 val_131 2008-04-08 11 +51 val_51 2008-04-08 11 +404 val_404 2008-04-08 11 +43 val_43 2008-04-08 11 +436 val_436 2008-04-08 11 +156 val_156 2008-04-08 11 +469 val_469 2008-04-08 11 +468 val_468 2008-04-08 11 +308 val_308 2008-04-08 11 +95 val_95 2008-04-08 11 +196 val_196 2008-04-08 11 +288 val_288 2008-04-08 11 +481 val_481 2008-04-08 11 +457 val_457 2008-04-08 11 +98 val_98 2008-04-08 11 +282 val_282 2008-04-08 11 +197 val_197 2008-04-08 11 +187 val_187 2008-04-08 11 +318 val_318 2008-04-08 11 +318 val_318 2008-04-08 11 +409 val_409 2008-04-08 11 +470 val_470 2008-04-08 11 +137 val_137 2008-04-08 11 +369 val_369 2008-04-08 11 +316 val_316 2008-04-08 11 +169 val_169 2008-04-08 11 +413 val_413 2008-04-08 11 +85 val_85 2008-04-08 11 +77 val_77 2008-04-08 11 +0 val_0 2008-04-08 11 +490 val_490 2008-04-08 11 +87 val_87 2008-04-08 11 +364 val_364 2008-04-08 11 +179 val_179 2008-04-08 11 +118 val_118 2008-04-08 11 +134 val_134 2008-04-08 11 +395 val_395 2008-04-08 11 +282 val_282 2008-04-08 11 +138 val_138 2008-04-08 11 +238 val_238 2008-04-08 11 +419 val_419 2008-04-08 11 +15 val_15 2008-04-08 11 +118 val_118 2008-04-08 11 +72 val_72 2008-04-08 11 +90 val_90 2008-04-08 11 +307 val_307 2008-04-08 11 +19 val_19 2008-04-08 11 +435 val_435 2008-04-08 11 +10 val_10 2008-04-08 11 +277 val_277 2008-04-08 11 +273 val_273 2008-04-08 11 +306 val_306 2008-04-08 11 +224 val_224 2008-04-08 11 +309 val_309 2008-04-08 11 +389 val_389 2008-04-08 11 +327 val_327 2008-04-08 11 +242 val_242 2008-04-08 11 +369 val_369 2008-04-08 11 +392 val_392 2008-04-08 11 +272 val_272 2008-04-08 11 +331 val_331 2008-04-08 11 +401 val_401 2008-04-08 11 +242 val_242 2008-04-08 11 +452 val_452 2008-04-08 11 +177 val_177 2008-04-08 11 +226 val_226 2008-04-08 11 +5 val_5 2008-04-08 11 +497 val_497 2008-04-08 11 +402 val_402 2008-04-08 11 +396 val_396 2008-04-08 11 +317 val_317 2008-04-08 11 +395 val_395 2008-04-08 11 +58 val_58 2008-04-08 11 +35 val_35 2008-04-08 11 +336 val_336 2008-04-08 11 +95 val_95 2008-04-08 11 +11 val_11 2008-04-08 11 +168 val_168 2008-04-08 11 +34 val_34 2008-04-08 11 +229 val_229 2008-04-08 11 +233 val_233 2008-04-08 11 +143 val_143 2008-04-08 11 +472 val_472 2008-04-08 11 +322 val_322 2008-04-08 11 +498 val_498 2008-04-08 11 +160 val_160 2008-04-08 11 +195 val_195 2008-04-08 11 +42 val_42 2008-04-08 11 +321 val_321 2008-04-08 11 +430 val_430 2008-04-08 11 +119 val_119 2008-04-08 11 +489 val_489 2008-04-08 11 +458 val_458 2008-04-08 11 +78 val_78 2008-04-08 11 +76 val_76 2008-04-08 11 +41 val_41 2008-04-08 11 +223 val_223 2008-04-08 11 +492 val_492 2008-04-08 11 +149 val_149 2008-04-08 11 +449 val_449 2008-04-08 11 +218 val_218 2008-04-08 11 +228 val_228 2008-04-08 11 +138 val_138 2008-04-08 11 +453 val_453 2008-04-08 11 +30 val_30 2008-04-08 11 +209 val_209 2008-04-08 11 +64 val_64 2008-04-08 11 +468 val_468 2008-04-08 11 +76 val_76 2008-04-08 11 +74 val_74 2008-04-08 11 +342 val_342 2008-04-08 11 +69 val_69 2008-04-08 11 +230 val_230 2008-04-08 11 +33 val_33 2008-04-08 11 +368 val_368 2008-04-08 11 +103 val_103 2008-04-08 11 +296 val_296 2008-04-08 11 +113 val_113 2008-04-08 11 +216 val_216 2008-04-08 11 +367 val_367 2008-04-08 11 +344 val_344 2008-04-08 11 +167 val_167 2008-04-08 11 +274 val_274 2008-04-08 11 +219 val_219 2008-04-08 11 +239 val_239 2008-04-08 11 +485 val_485 2008-04-08 11 +116 val_116 2008-04-08 11 +223 val_223 2008-04-08 11 +256 val_256 2008-04-08 11 +263 val_263 2008-04-08 11 +70 val_70 2008-04-08 11 +487 val_487 2008-04-08 11 +480 val_480 2008-04-08 11 +401 val_401 2008-04-08 11 +288 val_288 2008-04-08 11 +191 val_191 2008-04-08 11 +5 val_5 2008-04-08 11 +244 val_244 2008-04-08 11 +438 val_438 2008-04-08 11 +128 val_128 2008-04-08 11 +467 val_467 2008-04-08 11 +432 val_432 2008-04-08 11 +202 val_202 2008-04-08 11 +316 val_316 2008-04-08 11 +229 val_229 2008-04-08 11 +469 val_469 2008-04-08 11 +463 val_463 2008-04-08 11 +280 val_280 2008-04-08 11 +2 val_2 2008-04-08 11 +35 val_35 2008-04-08 11 +283 val_283 2008-04-08 11 +331 val_331 2008-04-08 11 +235 val_235 2008-04-08 11 +80 val_80 2008-04-08 11 +44 val_44 2008-04-08 11 +193 val_193 2008-04-08 11 +321 val_321 2008-04-08 11 +335 val_335 2008-04-08 11 +104 val_104 2008-04-08 11 +466 val_466 2008-04-08 11 +366 val_366 2008-04-08 11 +175 val_175 2008-04-08 11 +403 val_403 2008-04-08 11 +483 val_483 2008-04-08 11 +53 val_53 2008-04-08 11 +105 val_105 2008-04-08 11 +257 val_257 2008-04-08 11 +406 val_406 2008-04-08 11 +409 val_409 2008-04-08 11 +190 val_190 2008-04-08 11 +406 val_406 2008-04-08 11 +401 val_401 2008-04-08 11 +114 val_114 2008-04-08 11 +258 val_258 2008-04-08 11 +90 val_90 2008-04-08 11 +203 val_203 2008-04-08 11 +262 val_262 2008-04-08 11 +348 val_348 2008-04-08 11 +424 val_424 2008-04-08 11 +12 val_12 2008-04-08 11 +396 val_396 2008-04-08 11 +201 val_201 2008-04-08 11 +217 val_217 2008-04-08 11 +164 val_164 2008-04-08 11 +431 val_431 2008-04-08 11 +454 val_454 2008-04-08 11 +478 val_478 2008-04-08 11 +298 val_298 2008-04-08 11 +125 val_125 2008-04-08 11 +431 val_431 2008-04-08 11 +164 val_164 2008-04-08 11 +424 val_424 2008-04-08 11 +187 val_187 2008-04-08 11 +382 val_382 2008-04-08 11 +5 val_5 2008-04-08 11 +70 val_70 2008-04-08 11 +397 val_397 2008-04-08 11 +480 val_480 2008-04-08 11 +291 val_291 2008-04-08 11 +24 val_24 2008-04-08 11 +351 val_351 2008-04-08 11 +255 val_255 2008-04-08 11 +104 val_104 2008-04-08 11 +70 val_70 2008-04-08 11 +163 val_163 2008-04-08 11 +438 val_438 2008-04-08 11 +119 val_119 2008-04-08 11 +414 val_414 2008-04-08 11 +200 val_200 2008-04-08 11 +491 val_491 2008-04-08 11 +237 val_237 2008-04-08 11 +439 val_439 2008-04-08 11 +360 val_360 2008-04-08 11 +248 val_248 2008-04-08 11 +479 val_479 2008-04-08 11 +305 val_305 2008-04-08 11 +417 val_417 2008-04-08 11 +199 val_199 2008-04-08 11 +444 val_444 2008-04-08 11 +120 val_120 2008-04-08 11 +429 val_429 2008-04-08 11 +169 val_169 2008-04-08 11 +443 val_443 2008-04-08 11 +323 val_323 2008-04-08 11 +325 val_325 2008-04-08 11 +277 val_277 2008-04-08 11 +230 val_230 2008-04-08 11 +478 val_478 2008-04-08 11 +178 val_178 2008-04-08 11 +468 val_468 2008-04-08 11 +310 val_310 2008-04-08 11 +317 val_317 2008-04-08 11 +333 val_333 2008-04-08 11 +493 val_493 2008-04-08 11 +460 val_460 2008-04-08 11 +207 val_207 2008-04-08 11 +249 val_249 2008-04-08 11 +265 val_265 2008-04-08 11 +480 val_480 2008-04-08 11 +83 val_83 2008-04-08 11 +136 val_136 2008-04-08 11 +353 val_353 2008-04-08 11 +172 val_172 2008-04-08 11 +214 val_214 2008-04-08 11 +462 val_462 2008-04-08 11 +233 val_233 2008-04-08 11 +406 val_406 2008-04-08 11 +133 val_133 2008-04-08 11 +175 val_175 2008-04-08 11 +189 val_189 2008-04-08 11 +454 val_454 2008-04-08 11 +375 val_375 2008-04-08 11 +401 val_401 2008-04-08 11 +421 val_421 2008-04-08 11 +407 val_407 2008-04-08 11 +384 val_384 2008-04-08 11 +256 val_256 2008-04-08 11 +26 val_26 2008-04-08 11 +134 val_134 2008-04-08 11 +67 val_67 2008-04-08 11 +384 val_384 2008-04-08 11 +379 val_379 2008-04-08 11 +18 val_18 2008-04-08 11 +462 val_462 2008-04-08 11 +492 val_492 2008-04-08 11 +100 val_100 2008-04-08 11 +298 val_298 2008-04-08 11 +9 val_9 2008-04-08 11 +341 val_341 2008-04-08 11 +498 val_498 2008-04-08 11 +146 val_146 2008-04-08 11 +458 val_458 2008-04-08 11 +362 val_362 2008-04-08 11 +186 val_186 2008-04-08 11 +285 val_285 2008-04-08 11 +348 val_348 2008-04-08 11 +167 val_167 2008-04-08 11 +18 val_18 2008-04-08 11 +273 val_273 2008-04-08 11 +183 val_183 2008-04-08 11 +281 val_281 2008-04-08 11 +344 val_344 2008-04-08 11 +97 val_97 2008-04-08 11 +469 val_469 2008-04-08 11 +315 val_315 2008-04-08 11 +84 val_84 2008-04-08 11 +28 val_28 2008-04-08 11 +37 val_37 2008-04-08 11 +448 val_448 2008-04-08 11 +152 val_152 2008-04-08 11 +348 val_348 2008-04-08 11 +307 val_307 2008-04-08 11 +194 val_194 2008-04-08 11 +414 val_414 2008-04-08 11 +477 val_477 2008-04-08 11 +222 val_222 2008-04-08 11 +126 val_126 2008-04-08 11 +90 val_90 2008-04-08 11 +169 val_169 2008-04-08 11 +403 val_403 2008-04-08 11 +400 val_400 2008-04-08 11 +200 val_200 2008-04-08 11 +97 val_97 2008-04-08 11 +238 val_238 2008-04-08 12 +86 val_86 2008-04-08 12 +311 val_311 2008-04-08 12 +27 val_27 2008-04-08 12 +165 val_165 2008-04-08 12 +409 val_409 2008-04-08 12 +255 val_255 2008-04-08 12 +278 val_278 2008-04-08 12 +98 val_98 2008-04-08 12 +484 val_484 2008-04-08 12 +265 val_265 2008-04-08 12 +193 val_193 2008-04-08 12 +401 val_401 2008-04-08 12 +150 val_150 2008-04-08 12 +273 val_273 2008-04-08 12 +224 val_224 2008-04-08 12 +369 val_369 2008-04-08 12 +66 val_66 2008-04-08 12 +128 val_128 2008-04-08 12 +213 val_213 2008-04-08 12 +146 val_146 2008-04-08 12 +406 val_406 2008-04-08 12 +429 val_429 2008-04-08 12 +374 val_374 2008-04-08 12 +152 val_152 2008-04-08 12 +469 val_469 2008-04-08 12 +145 val_145 2008-04-08 12 +495 val_495 2008-04-08 12 +37 val_37 2008-04-08 12 +327 val_327 2008-04-08 12 +281 val_281 2008-04-08 12 +277 val_277 2008-04-08 12 +209 val_209 2008-04-08 12 +15 val_15 2008-04-08 12 +82 val_82 2008-04-08 12 +403 val_403 2008-04-08 12 +166 val_166 2008-04-08 12 +417 val_417 2008-04-08 12 +430 val_430 2008-04-08 12 +252 val_252 2008-04-08 12 +292 val_292 2008-04-08 12 +219 val_219 2008-04-08 12 +287 val_287 2008-04-08 12 +153 val_153 2008-04-08 12 +193 val_193 2008-04-08 12 +338 val_338 2008-04-08 12 +446 val_446 2008-04-08 12 +459 val_459 2008-04-08 12 +394 val_394 2008-04-08 12 +237 val_237 2008-04-08 12 +482 val_482 2008-04-08 12 +174 val_174 2008-04-08 12 +413 val_413 2008-04-08 12 +494 val_494 2008-04-08 12 +207 val_207 2008-04-08 12 +199 val_199 2008-04-08 12 +466 val_466 2008-04-08 12 +208 val_208 2008-04-08 12 +174 val_174 2008-04-08 12 +399 val_399 2008-04-08 12 +396 val_396 2008-04-08 12 +247 val_247 2008-04-08 12 +417 val_417 2008-04-08 12 +489 val_489 2008-04-08 12 +162 val_162 2008-04-08 12 +377 val_377 2008-04-08 12 +397 val_397 2008-04-08 12 +309 val_309 2008-04-08 12 +365 val_365 2008-04-08 12 +266 val_266 2008-04-08 12 +439 val_439 2008-04-08 12 +342 val_342 2008-04-08 12 +367 val_367 2008-04-08 12 +325 val_325 2008-04-08 12 +167 val_167 2008-04-08 12 +195 val_195 2008-04-08 12 +475 val_475 2008-04-08 12 +17 val_17 2008-04-08 12 +113 val_113 2008-04-08 12 +155 val_155 2008-04-08 12 +203 val_203 2008-04-08 12 +339 val_339 2008-04-08 12 +0 val_0 2008-04-08 12 +455 val_455 2008-04-08 12 +128 val_128 2008-04-08 12 +311 val_311 2008-04-08 12 +316 val_316 2008-04-08 12 +57 val_57 2008-04-08 12 +302 val_302 2008-04-08 12 +205 val_205 2008-04-08 12 +149 val_149 2008-04-08 12 +438 val_438 2008-04-08 12 +345 val_345 2008-04-08 12 +129 val_129 2008-04-08 12 +170 val_170 2008-04-08 12 +20 val_20 2008-04-08 12 +489 val_489 2008-04-08 12 +157 val_157 2008-04-08 12 +378 val_378 2008-04-08 12 +221 val_221 2008-04-08 12 +92 val_92 2008-04-08 12 +111 val_111 2008-04-08 12 +47 val_47 2008-04-08 12 +72 val_72 2008-04-08 12 +4 val_4 2008-04-08 12 +280 val_280 2008-04-08 12 +35 val_35 2008-04-08 12 +427 val_427 2008-04-08 12 +277 val_277 2008-04-08 12 +208 val_208 2008-04-08 12 +356 val_356 2008-04-08 12 +399 val_399 2008-04-08 12 +169 val_169 2008-04-08 12 +382 val_382 2008-04-08 12 +498 val_498 2008-04-08 12 +125 val_125 2008-04-08 12 +386 val_386 2008-04-08 12 +437 val_437 2008-04-08 12 +469 val_469 2008-04-08 12 +192 val_192 2008-04-08 12 +286 val_286 2008-04-08 12 +187 val_187 2008-04-08 12 +176 val_176 2008-04-08 12 +54 val_54 2008-04-08 12 +459 val_459 2008-04-08 12 +51 val_51 2008-04-08 12 +138 val_138 2008-04-08 12 +103 val_103 2008-04-08 12 +239 val_239 2008-04-08 12 +213 val_213 2008-04-08 12 +216 val_216 2008-04-08 12 +430 val_430 2008-04-08 12 +278 val_278 2008-04-08 12 +176 val_176 2008-04-08 12 +289 val_289 2008-04-08 12 +221 val_221 2008-04-08 12 +65 val_65 2008-04-08 12 +318 val_318 2008-04-08 12 +332 val_332 2008-04-08 12 +311 val_311 2008-04-08 12 +275 val_275 2008-04-08 12 +137 val_137 2008-04-08 12 +241 val_241 2008-04-08 12 +83 val_83 2008-04-08 12 +333 val_333 2008-04-08 12 +180 val_180 2008-04-08 12 +284 val_284 2008-04-08 12 +12 val_12 2008-04-08 12 +230 val_230 2008-04-08 12 +181 val_181 2008-04-08 12 +67 val_67 2008-04-08 12 +260 val_260 2008-04-08 12 +404 val_404 2008-04-08 12 +384 val_384 2008-04-08 12 +489 val_489 2008-04-08 12 +353 val_353 2008-04-08 12 +373 val_373 2008-04-08 12 +272 val_272 2008-04-08 12 +138 val_138 2008-04-08 12 +217 val_217 2008-04-08 12 +84 val_84 2008-04-08 12 +348 val_348 2008-04-08 12 +466 val_466 2008-04-08 12 +58 val_58 2008-04-08 12 +8 val_8 2008-04-08 12 +411 val_411 2008-04-08 12 +230 val_230 2008-04-08 12 +208 val_208 2008-04-08 12 +348 val_348 2008-04-08 12 +24 val_24 2008-04-08 12 +463 val_463 2008-04-08 12 +431 val_431 2008-04-08 12 +179 val_179 2008-04-08 12 +172 val_172 2008-04-08 12 +42 val_42 2008-04-08 12 +129 val_129 2008-04-08 12 +158 val_158 2008-04-08 12 +119 val_119 2008-04-08 12 +496 val_496 2008-04-08 12 +0 val_0 2008-04-08 12 +322 val_322 2008-04-08 12 +197 val_197 2008-04-08 12 +468 val_468 2008-04-08 12 +393 val_393 2008-04-08 12 +454 val_454 2008-04-08 12 +100 val_100 2008-04-08 12 +298 val_298 2008-04-08 12 +199 val_199 2008-04-08 12 +191 val_191 2008-04-08 12 +418 val_418 2008-04-08 12 +96 val_96 2008-04-08 12 +26 val_26 2008-04-08 12 +165 val_165 2008-04-08 12 +327 val_327 2008-04-08 12 +230 val_230 2008-04-08 12 +205 val_205 2008-04-08 12 +120 val_120 2008-04-08 12 +131 val_131 2008-04-08 12 +51 val_51 2008-04-08 12 +404 val_404 2008-04-08 12 +43 val_43 2008-04-08 12 +436 val_436 2008-04-08 12 +156 val_156 2008-04-08 12 +469 val_469 2008-04-08 12 +468 val_468 2008-04-08 12 +308 val_308 2008-04-08 12 +95 val_95 2008-04-08 12 +196 val_196 2008-04-08 12 +288 val_288 2008-04-08 12 +481 val_481 2008-04-08 12 +457 val_457 2008-04-08 12 +98 val_98 2008-04-08 12 +282 val_282 2008-04-08 12 +197 val_197 2008-04-08 12 +187 val_187 2008-04-08 12 +318 val_318 2008-04-08 12 +318 val_318 2008-04-08 12 +409 val_409 2008-04-08 12 +470 val_470 2008-04-08 12 +137 val_137 2008-04-08 12 +369 val_369 2008-04-08 12 +316 val_316 2008-04-08 12 +169 val_169 2008-04-08 12 +413 val_413 2008-04-08 12 +85 val_85 2008-04-08 12 +77 val_77 2008-04-08 12 +0 val_0 2008-04-08 12 +490 val_490 2008-04-08 12 +87 val_87 2008-04-08 12 +364 val_364 2008-04-08 12 +179 val_179 2008-04-08 12 +118 val_118 2008-04-08 12 +134 val_134 2008-04-08 12 +395 val_395 2008-04-08 12 +282 val_282 2008-04-08 12 +138 val_138 2008-04-08 12 +238 val_238 2008-04-08 12 +419 val_419 2008-04-08 12 +15 val_15 2008-04-08 12 +118 val_118 2008-04-08 12 +72 val_72 2008-04-08 12 +90 val_90 2008-04-08 12 +307 val_307 2008-04-08 12 +19 val_19 2008-04-08 12 +435 val_435 2008-04-08 12 +10 val_10 2008-04-08 12 +277 val_277 2008-04-08 12 +273 val_273 2008-04-08 12 +306 val_306 2008-04-08 12 +224 val_224 2008-04-08 12 +309 val_309 2008-04-08 12 +389 val_389 2008-04-08 12 +327 val_327 2008-04-08 12 +242 val_242 2008-04-08 12 +369 val_369 2008-04-08 12 +392 val_392 2008-04-08 12 +272 val_272 2008-04-08 12 +331 val_331 2008-04-08 12 +401 val_401 2008-04-08 12 +242 val_242 2008-04-08 12 +452 val_452 2008-04-08 12 +177 val_177 2008-04-08 12 +226 val_226 2008-04-08 12 +5 val_5 2008-04-08 12 +497 val_497 2008-04-08 12 +402 val_402 2008-04-08 12 +396 val_396 2008-04-08 12 +317 val_317 2008-04-08 12 +395 val_395 2008-04-08 12 +58 val_58 2008-04-08 12 +35 val_35 2008-04-08 12 +336 val_336 2008-04-08 12 +95 val_95 2008-04-08 12 +11 val_11 2008-04-08 12 +168 val_168 2008-04-08 12 +34 val_34 2008-04-08 12 +229 val_229 2008-04-08 12 +233 val_233 2008-04-08 12 +143 val_143 2008-04-08 12 +472 val_472 2008-04-08 12 +322 val_322 2008-04-08 12 +498 val_498 2008-04-08 12 +160 val_160 2008-04-08 12 +195 val_195 2008-04-08 12 +42 val_42 2008-04-08 12 +321 val_321 2008-04-08 12 +430 val_430 2008-04-08 12 +119 val_119 2008-04-08 12 +489 val_489 2008-04-08 12 +458 val_458 2008-04-08 12 +78 val_78 2008-04-08 12 +76 val_76 2008-04-08 12 +41 val_41 2008-04-08 12 +223 val_223 2008-04-08 12 +492 val_492 2008-04-08 12 +149 val_149 2008-04-08 12 +449 val_449 2008-04-08 12 +218 val_218 2008-04-08 12 +228 val_228 2008-04-08 12 +138 val_138 2008-04-08 12 +453 val_453 2008-04-08 12 +30 val_30 2008-04-08 12 +209 val_209 2008-04-08 12 +64 val_64 2008-04-08 12 +468 val_468 2008-04-08 12 +76 val_76 2008-04-08 12 +74 val_74 2008-04-08 12 +342 val_342 2008-04-08 12 +69 val_69 2008-04-08 12 +230 val_230 2008-04-08 12 +33 val_33 2008-04-08 12 +368 val_368 2008-04-08 12 +103 val_103 2008-04-08 12 +296 val_296 2008-04-08 12 +113 val_113 2008-04-08 12 +216 val_216 2008-04-08 12 +367 val_367 2008-04-08 12 +344 val_344 2008-04-08 12 +167 val_167 2008-04-08 12 +274 val_274 2008-04-08 12 +219 val_219 2008-04-08 12 +239 val_239 2008-04-08 12 +485 val_485 2008-04-08 12 +116 val_116 2008-04-08 12 +223 val_223 2008-04-08 12 +256 val_256 2008-04-08 12 +263 val_263 2008-04-08 12 +70 val_70 2008-04-08 12 +487 val_487 2008-04-08 12 +480 val_480 2008-04-08 12 +401 val_401 2008-04-08 12 +288 val_288 2008-04-08 12 +191 val_191 2008-04-08 12 +5 val_5 2008-04-08 12 +244 val_244 2008-04-08 12 +438 val_438 2008-04-08 12 +128 val_128 2008-04-08 12 +467 val_467 2008-04-08 12 +432 val_432 2008-04-08 12 +202 val_202 2008-04-08 12 +316 val_316 2008-04-08 12 +229 val_229 2008-04-08 12 +469 val_469 2008-04-08 12 +463 val_463 2008-04-08 12 +280 val_280 2008-04-08 12 +2 val_2 2008-04-08 12 +35 val_35 2008-04-08 12 +283 val_283 2008-04-08 12 +331 val_331 2008-04-08 12 +235 val_235 2008-04-08 12 +80 val_80 2008-04-08 12 +44 val_44 2008-04-08 12 +193 val_193 2008-04-08 12 +321 val_321 2008-04-08 12 +335 val_335 2008-04-08 12 +104 val_104 2008-04-08 12 +466 val_466 2008-04-08 12 +366 val_366 2008-04-08 12 +175 val_175 2008-04-08 12 +403 val_403 2008-04-08 12 +483 val_483 2008-04-08 12 +53 val_53 2008-04-08 12 +105 val_105 2008-04-08 12 +257 val_257 2008-04-08 12 +406 val_406 2008-04-08 12 +409 val_409 2008-04-08 12 +190 val_190 2008-04-08 12 +406 val_406 2008-04-08 12 +401 val_401 2008-04-08 12 +114 val_114 2008-04-08 12 +258 val_258 2008-04-08 12 +90 val_90 2008-04-08 12 +203 val_203 2008-04-08 12 +262 val_262 2008-04-08 12 +348 val_348 2008-04-08 12 +424 val_424 2008-04-08 12 +12 val_12 2008-04-08 12 +396 val_396 2008-04-08 12 +201 val_201 2008-04-08 12 +217 val_217 2008-04-08 12 +164 val_164 2008-04-08 12 +431 val_431 2008-04-08 12 +454 val_454 2008-04-08 12 +478 val_478 2008-04-08 12 +298 val_298 2008-04-08 12 +125 val_125 2008-04-08 12 +431 val_431 2008-04-08 12 +164 val_164 2008-04-08 12 +424 val_424 2008-04-08 12 +187 val_187 2008-04-08 12 +382 val_382 2008-04-08 12 +5 val_5 2008-04-08 12 +70 val_70 2008-04-08 12 +397 val_397 2008-04-08 12 +480 val_480 2008-04-08 12 +291 val_291 2008-04-08 12 +24 val_24 2008-04-08 12 +351 val_351 2008-04-08 12 +255 val_255 2008-04-08 12 +104 val_104 2008-04-08 12 +70 val_70 2008-04-08 12 +163 val_163 2008-04-08 12 +438 val_438 2008-04-08 12 +119 val_119 2008-04-08 12 +414 val_414 2008-04-08 12 +200 val_200 2008-04-08 12 +491 val_491 2008-04-08 12 +237 val_237 2008-04-08 12 +439 val_439 2008-04-08 12 +360 val_360 2008-04-08 12 +248 val_248 2008-04-08 12 +479 val_479 2008-04-08 12 +305 val_305 2008-04-08 12 +417 val_417 2008-04-08 12 +199 val_199 2008-04-08 12 +444 val_444 2008-04-08 12 +120 val_120 2008-04-08 12 +429 val_429 2008-04-08 12 +169 val_169 2008-04-08 12 +443 val_443 2008-04-08 12 +323 val_323 2008-04-08 12 +325 val_325 2008-04-08 12 +277 val_277 2008-04-08 12 +230 val_230 2008-04-08 12 +478 val_478 2008-04-08 12 +178 val_178 2008-04-08 12 +468 val_468 2008-04-08 12 +310 val_310 2008-04-08 12 +317 val_317 2008-04-08 12 +333 val_333 2008-04-08 12 +493 val_493 2008-04-08 12 +460 val_460 2008-04-08 12 +207 val_207 2008-04-08 12 +249 val_249 2008-04-08 12 +265 val_265 2008-04-08 12 +480 val_480 2008-04-08 12 +83 val_83 2008-04-08 12 +136 val_136 2008-04-08 12 +353 val_353 2008-04-08 12 +172 val_172 2008-04-08 12 +214 val_214 2008-04-08 12 +462 val_462 2008-04-08 12 +233 val_233 2008-04-08 12 +406 val_406 2008-04-08 12 +133 val_133 2008-04-08 12 +175 val_175 2008-04-08 12 +189 val_189 2008-04-08 12 +454 val_454 2008-04-08 12 +375 val_375 2008-04-08 12 +401 val_401 2008-04-08 12 +421 val_421 2008-04-08 12 +407 val_407 2008-04-08 12 +384 val_384 2008-04-08 12 +256 val_256 2008-04-08 12 +26 val_26 2008-04-08 12 +134 val_134 2008-04-08 12 +67 val_67 2008-04-08 12 +384 val_384 2008-04-08 12 +379 val_379 2008-04-08 12 +18 val_18 2008-04-08 12 +462 val_462 2008-04-08 12 +492 val_492 2008-04-08 12 +100 val_100 2008-04-08 12 +298 val_298 2008-04-08 12 +9 val_9 2008-04-08 12 +341 val_341 2008-04-08 12 +498 val_498 2008-04-08 12 +146 val_146 2008-04-08 12 +458 val_458 2008-04-08 12 +362 val_362 2008-04-08 12 +186 val_186 2008-04-08 12 +285 val_285 2008-04-08 12 +348 val_348 2008-04-08 12 +167 val_167 2008-04-08 12 +18 val_18 2008-04-08 12 +273 val_273 2008-04-08 12 +183 val_183 2008-04-08 12 +281 val_281 2008-04-08 12 +344 val_344 2008-04-08 12 +97 val_97 2008-04-08 12 +469 val_469 2008-04-08 12 +315 val_315 2008-04-08 12 +84 val_84 2008-04-08 12 +28 val_28 2008-04-08 12 +37 val_37 2008-04-08 12 +448 val_448 2008-04-08 12 +152 val_152 2008-04-08 12 +348 val_348 2008-04-08 12 +307 val_307 2008-04-08 12 +194 val_194 2008-04-08 12 +414 val_414 2008-04-08 12 +477 val_477 2008-04-08 12 +222 val_222 2008-04-08 12 +126 val_126 2008-04-08 12 +90 val_90 2008-04-08 12 +169 val_169 2008-04-08 12 +403 val_403 2008-04-08 12 +400 val_400 2008-04-08 12 +200 val_200 2008-04-08 12 +97 val_97 2008-04-08 12 +PREHOOK: query: select * from nzhang_part2 where ds is not null and hr is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@nzhang_part2 +PREHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=11 +PREHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select * from nzhang_part2 where ds is not null and hr is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@nzhang_part2 +POSTHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=11 +POSTHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=12 +#### A masked pattern was here #### +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 2008-12-31 11 +86 val_86 2008-12-31 11 +311 val_311 2008-12-31 11 +27 val_27 2008-12-31 11 +165 val_165 2008-12-31 11 +409 val_409 2008-12-31 11 +255 val_255 2008-12-31 11 +278 val_278 2008-12-31 11 +98 val_98 2008-12-31 11 +484 val_484 2008-12-31 11 +265 val_265 2008-12-31 11 +193 val_193 2008-12-31 11 +401 val_401 2008-12-31 11 +150 val_150 2008-12-31 11 +273 val_273 2008-12-31 11 +224 val_224 2008-12-31 11 +369 val_369 2008-12-31 11 +66 val_66 2008-12-31 11 +128 val_128 2008-12-31 11 +213 val_213 2008-12-31 11 +146 val_146 2008-12-31 11 +406 val_406 2008-12-31 11 +429 val_429 2008-12-31 11 +374 val_374 2008-12-31 11 +152 val_152 2008-12-31 11 +469 val_469 2008-12-31 11 +145 val_145 2008-12-31 11 +495 val_495 2008-12-31 11 +37 val_37 2008-12-31 11 +327 val_327 2008-12-31 11 +281 val_281 2008-12-31 11 +277 val_277 2008-12-31 11 +209 val_209 2008-12-31 11 +15 val_15 2008-12-31 11 +82 val_82 2008-12-31 11 +403 val_403 2008-12-31 11 +166 val_166 2008-12-31 11 +417 val_417 2008-12-31 11 +430 val_430 2008-12-31 11 +252 val_252 2008-12-31 11 +292 val_292 2008-12-31 11 +219 val_219 2008-12-31 11 +287 val_287 2008-12-31 11 +153 val_153 2008-12-31 11 +193 val_193 2008-12-31 11 +338 val_338 2008-12-31 11 +446 val_446 2008-12-31 11 +459 val_459 2008-12-31 11 +394 val_394 2008-12-31 11 +237 val_237 2008-12-31 11 +482 val_482 2008-12-31 11 +174 val_174 2008-12-31 11 +413 val_413 2008-12-31 11 +494 val_494 2008-12-31 11 +207 val_207 2008-12-31 11 +199 val_199 2008-12-31 11 +466 val_466 2008-12-31 11 +208 val_208 2008-12-31 11 +174 val_174 2008-12-31 11 +399 val_399 2008-12-31 11 +396 val_396 2008-12-31 11 +247 val_247 2008-12-31 11 +417 val_417 2008-12-31 11 +489 val_489 2008-12-31 11 +162 val_162 2008-12-31 11 +377 val_377 2008-12-31 11 +397 val_397 2008-12-31 11 +309 val_309 2008-12-31 11 +365 val_365 2008-12-31 11 +266 val_266 2008-12-31 11 +439 val_439 2008-12-31 11 +342 val_342 2008-12-31 11 +367 val_367 2008-12-31 11 +325 val_325 2008-12-31 11 +167 val_167 2008-12-31 11 +195 val_195 2008-12-31 11 +475 val_475 2008-12-31 11 +17 val_17 2008-12-31 11 +113 val_113 2008-12-31 11 +155 val_155 2008-12-31 11 +203 val_203 2008-12-31 11 +339 val_339 2008-12-31 11 +0 val_0 2008-12-31 11 +455 val_455 2008-12-31 11 +128 val_128 2008-12-31 11 +311 val_311 2008-12-31 11 +316 val_316 2008-12-31 11 +57 val_57 2008-12-31 11 +302 val_302 2008-12-31 11 +205 val_205 2008-12-31 11 +149 val_149 2008-12-31 11 +438 val_438 2008-12-31 11 +345 val_345 2008-12-31 11 +129 val_129 2008-12-31 11 +170 val_170 2008-12-31 11 +20 val_20 2008-12-31 11 +489 val_489 2008-12-31 11 +157 val_157 2008-12-31 11 +378 val_378 2008-12-31 11 +221 val_221 2008-12-31 11 +92 val_92 2008-12-31 11 +111 val_111 2008-12-31 11 +47 val_47 2008-12-31 11 +72 val_72 2008-12-31 11 +4 val_4 2008-12-31 11 +280 val_280 2008-12-31 11 +35 val_35 2008-12-31 11 +427 val_427 2008-12-31 11 +277 val_277 2008-12-31 11 +208 val_208 2008-12-31 11 +356 val_356 2008-12-31 11 +399 val_399 2008-12-31 11 +169 val_169 2008-12-31 11 +382 val_382 2008-12-31 11 +498 val_498 2008-12-31 11 +125 val_125 2008-12-31 11 +386 val_386 2008-12-31 11 +437 val_437 2008-12-31 11 +469 val_469 2008-12-31 11 +192 val_192 2008-12-31 11 +286 val_286 2008-12-31 11 +187 val_187 2008-12-31 11 +176 val_176 2008-12-31 11 +54 val_54 2008-12-31 11 +459 val_459 2008-12-31 11 +51 val_51 2008-12-31 11 +138 val_138 2008-12-31 11 +103 val_103 2008-12-31 11 +239 val_239 2008-12-31 11 +213 val_213 2008-12-31 11 +216 val_216 2008-12-31 11 +430 val_430 2008-12-31 11 +278 val_278 2008-12-31 11 +176 val_176 2008-12-31 11 +289 val_289 2008-12-31 11 +221 val_221 2008-12-31 11 +65 val_65 2008-12-31 11 +318 val_318 2008-12-31 11 +332 val_332 2008-12-31 11 +311 val_311 2008-12-31 11 +275 val_275 2008-12-31 11 +137 val_137 2008-12-31 11 +241 val_241 2008-12-31 11 +83 val_83 2008-12-31 11 +333 val_333 2008-12-31 11 +180 val_180 2008-12-31 11 +284 val_284 2008-12-31 11 +12 val_12 2008-12-31 11 +230 val_230 2008-12-31 11 +181 val_181 2008-12-31 11 +67 val_67 2008-12-31 11 +260 val_260 2008-12-31 11 +404 val_404 2008-12-31 11 +384 val_384 2008-12-31 11 +489 val_489 2008-12-31 11 +353 val_353 2008-12-31 11 +373 val_373 2008-12-31 11 +272 val_272 2008-12-31 11 +138 val_138 2008-12-31 11 +217 val_217 2008-12-31 11 +84 val_84 2008-12-31 11 +348 val_348 2008-12-31 11 +466 val_466 2008-12-31 11 +58 val_58 2008-12-31 11 +8 val_8 2008-12-31 11 +411 val_411 2008-12-31 11 +230 val_230 2008-12-31 11 +208 val_208 2008-12-31 11 +348 val_348 2008-12-31 11 +24 val_24 2008-12-31 11 +463 val_463 2008-12-31 11 +431 val_431 2008-12-31 11 +179 val_179 2008-12-31 11 +172 val_172 2008-12-31 11 +42 val_42 2008-12-31 11 +129 val_129 2008-12-31 11 +158 val_158 2008-12-31 11 +119 val_119 2008-12-31 11 +496 val_496 2008-12-31 11 +0 val_0 2008-12-31 11 +322 val_322 2008-12-31 11 +197 val_197 2008-12-31 11 +468 val_468 2008-12-31 11 +393 val_393 2008-12-31 11 +454 val_454 2008-12-31 11 +100 val_100 2008-12-31 11 +298 val_298 2008-12-31 11 +199 val_199 2008-12-31 11 +191 val_191 2008-12-31 11 +418 val_418 2008-12-31 11 +96 val_96 2008-12-31 11 +26 val_26 2008-12-31 11 +165 val_165 2008-12-31 11 +327 val_327 2008-12-31 11 +230 val_230 2008-12-31 11 +205 val_205 2008-12-31 11 +120 val_120 2008-12-31 11 +131 val_131 2008-12-31 11 +51 val_51 2008-12-31 11 +404 val_404 2008-12-31 11 +43 val_43 2008-12-31 11 +436 val_436 2008-12-31 11 +156 val_156 2008-12-31 11 +469 val_469 2008-12-31 11 +468 val_468 2008-12-31 11 +308 val_308 2008-12-31 11 +95 val_95 2008-12-31 11 +196 val_196 2008-12-31 11 +288 val_288 2008-12-31 11 +481 val_481 2008-12-31 11 +457 val_457 2008-12-31 11 +98 val_98 2008-12-31 11 +282 val_282 2008-12-31 11 +197 val_197 2008-12-31 11 +187 val_187 2008-12-31 11 +318 val_318 2008-12-31 11 +318 val_318 2008-12-31 11 +409 val_409 2008-12-31 11 +470 val_470 2008-12-31 11 +137 val_137 2008-12-31 11 +369 val_369 2008-12-31 11 +316 val_316 2008-12-31 11 +169 val_169 2008-12-31 11 +413 val_413 2008-12-31 11 +85 val_85 2008-12-31 11 +77 val_77 2008-12-31 11 +0 val_0 2008-12-31 11 +490 val_490 2008-12-31 11 +87 val_87 2008-12-31 11 +364 val_364 2008-12-31 11 +179 val_179 2008-12-31 11 +118 val_118 2008-12-31 11 +134 val_134 2008-12-31 11 +395 val_395 2008-12-31 11 +282 val_282 2008-12-31 11 +138 val_138 2008-12-31 11 +238 val_238 2008-12-31 11 +419 val_419 2008-12-31 11 +15 val_15 2008-12-31 11 +118 val_118 2008-12-31 11 +72 val_72 2008-12-31 11 +90 val_90 2008-12-31 11 +307 val_307 2008-12-31 11 +19 val_19 2008-12-31 11 +435 val_435 2008-12-31 11 +10 val_10 2008-12-31 11 +277 val_277 2008-12-31 11 +273 val_273 2008-12-31 11 +306 val_306 2008-12-31 11 +224 val_224 2008-12-31 11 +309 val_309 2008-12-31 11 +389 val_389 2008-12-31 11 +327 val_327 2008-12-31 11 +242 val_242 2008-12-31 11 +369 val_369 2008-12-31 11 +392 val_392 2008-12-31 11 +272 val_272 2008-12-31 11 +331 val_331 2008-12-31 11 +401 val_401 2008-12-31 11 +242 val_242 2008-12-31 11 +452 val_452 2008-12-31 11 +177 val_177 2008-12-31 11 +226 val_226 2008-12-31 11 +5 val_5 2008-12-31 11 +497 val_497 2008-12-31 11 +402 val_402 2008-12-31 11 +396 val_396 2008-12-31 11 +317 val_317 2008-12-31 11 +395 val_395 2008-12-31 11 +58 val_58 2008-12-31 11 +35 val_35 2008-12-31 11 +336 val_336 2008-12-31 11 +95 val_95 2008-12-31 11 +11 val_11 2008-12-31 11 +168 val_168 2008-12-31 11 +34 val_34 2008-12-31 11 +229 val_229 2008-12-31 11 +233 val_233 2008-12-31 11 +143 val_143 2008-12-31 11 +472 val_472 2008-12-31 11 +322 val_322 2008-12-31 11 +498 val_498 2008-12-31 11 +160 val_160 2008-12-31 11 +195 val_195 2008-12-31 11 +42 val_42 2008-12-31 11 +321 val_321 2008-12-31 11 +430 val_430 2008-12-31 11 +119 val_119 2008-12-31 11 +489 val_489 2008-12-31 11 +458 val_458 2008-12-31 11 +78 val_78 2008-12-31 11 +76 val_76 2008-12-31 11 +41 val_41 2008-12-31 11 +223 val_223 2008-12-31 11 +492 val_492 2008-12-31 11 +149 val_149 2008-12-31 11 +449 val_449 2008-12-31 11 +218 val_218 2008-12-31 11 +228 val_228 2008-12-31 11 +138 val_138 2008-12-31 11 +453 val_453 2008-12-31 11 +30 val_30 2008-12-31 11 +209 val_209 2008-12-31 11 +64 val_64 2008-12-31 11 +468 val_468 2008-12-31 11 +76 val_76 2008-12-31 11 +74 val_74 2008-12-31 11 +342 val_342 2008-12-31 11 +69 val_69 2008-12-31 11 +230 val_230 2008-12-31 11 +33 val_33 2008-12-31 11 +368 val_368 2008-12-31 11 +103 val_103 2008-12-31 11 +296 val_296 2008-12-31 11 +113 val_113 2008-12-31 11 +216 val_216 2008-12-31 11 +367 val_367 2008-12-31 11 +344 val_344 2008-12-31 11 +167 val_167 2008-12-31 11 +274 val_274 2008-12-31 11 +219 val_219 2008-12-31 11 +239 val_239 2008-12-31 11 +485 val_485 2008-12-31 11 +116 val_116 2008-12-31 11 +223 val_223 2008-12-31 11 +256 val_256 2008-12-31 11 +263 val_263 2008-12-31 11 +70 val_70 2008-12-31 11 +487 val_487 2008-12-31 11 +480 val_480 2008-12-31 11 +401 val_401 2008-12-31 11 +288 val_288 2008-12-31 11 +191 val_191 2008-12-31 11 +5 val_5 2008-12-31 11 +244 val_244 2008-12-31 11 +438 val_438 2008-12-31 11 +128 val_128 2008-12-31 11 +467 val_467 2008-12-31 11 +432 val_432 2008-12-31 11 +202 val_202 2008-12-31 11 +316 val_316 2008-12-31 11 +229 val_229 2008-12-31 11 +469 val_469 2008-12-31 11 +463 val_463 2008-12-31 11 +280 val_280 2008-12-31 11 +2 val_2 2008-12-31 11 +35 val_35 2008-12-31 11 +283 val_283 2008-12-31 11 +331 val_331 2008-12-31 11 +235 val_235 2008-12-31 11 +80 val_80 2008-12-31 11 +44 val_44 2008-12-31 11 +193 val_193 2008-12-31 11 +321 val_321 2008-12-31 11 +335 val_335 2008-12-31 11 +104 val_104 2008-12-31 11 +466 val_466 2008-12-31 11 +366 val_366 2008-12-31 11 +175 val_175 2008-12-31 11 +403 val_403 2008-12-31 11 +483 val_483 2008-12-31 11 +53 val_53 2008-12-31 11 +105 val_105 2008-12-31 11 +257 val_257 2008-12-31 11 +406 val_406 2008-12-31 11 +409 val_409 2008-12-31 11 +190 val_190 2008-12-31 11 +406 val_406 2008-12-31 11 +401 val_401 2008-12-31 11 +114 val_114 2008-12-31 11 +258 val_258 2008-12-31 11 +90 val_90 2008-12-31 11 +203 val_203 2008-12-31 11 +262 val_262 2008-12-31 11 +348 val_348 2008-12-31 11 +424 val_424 2008-12-31 11 +12 val_12 2008-12-31 11 +396 val_396 2008-12-31 11 +201 val_201 2008-12-31 11 +217 val_217 2008-12-31 11 +164 val_164 2008-12-31 11 +431 val_431 2008-12-31 11 +454 val_454 2008-12-31 11 +478 val_478 2008-12-31 11 +298 val_298 2008-12-31 11 +125 val_125 2008-12-31 11 +431 val_431 2008-12-31 11 +164 val_164 2008-12-31 11 +424 val_424 2008-12-31 11 +187 val_187 2008-12-31 11 +382 val_382 2008-12-31 11 +5 val_5 2008-12-31 11 +70 val_70 2008-12-31 11 +397 val_397 2008-12-31 11 +480 val_480 2008-12-31 11 +291 val_291 2008-12-31 11 +24 val_24 2008-12-31 11 +351 val_351 2008-12-31 11 +255 val_255 2008-12-31 11 +104 val_104 2008-12-31 11 +70 val_70 2008-12-31 11 +163 val_163 2008-12-31 11 +438 val_438 2008-12-31 11 +119 val_119 2008-12-31 11 +414 val_414 2008-12-31 11 +200 val_200 2008-12-31 11 +491 val_491 2008-12-31 11 +237 val_237 2008-12-31 11 +439 val_439 2008-12-31 11 +360 val_360 2008-12-31 11 +248 val_248 2008-12-31 11 +479 val_479 2008-12-31 11 +305 val_305 2008-12-31 11 +417 val_417 2008-12-31 11 +199 val_199 2008-12-31 11 +444 val_444 2008-12-31 11 +120 val_120 2008-12-31 11 +429 val_429 2008-12-31 11 +169 val_169 2008-12-31 11 +443 val_443 2008-12-31 11 +323 val_323 2008-12-31 11 +325 val_325 2008-12-31 11 +277 val_277 2008-12-31 11 +230 val_230 2008-12-31 11 +478 val_478 2008-12-31 11 +178 val_178 2008-12-31 11 +468 val_468 2008-12-31 11 +310 val_310 2008-12-31 11 +317 val_317 2008-12-31 11 +333 val_333 2008-12-31 11 +493 val_493 2008-12-31 11 +460 val_460 2008-12-31 11 +207 val_207 2008-12-31 11 +249 val_249 2008-12-31 11 +265 val_265 2008-12-31 11 +480 val_480 2008-12-31 11 +83 val_83 2008-12-31 11 +136 val_136 2008-12-31 11 +353 val_353 2008-12-31 11 +172 val_172 2008-12-31 11 +214 val_214 2008-12-31 11 +462 val_462 2008-12-31 11 +233 val_233 2008-12-31 11 +406 val_406 2008-12-31 11 +133 val_133 2008-12-31 11 +175 val_175 2008-12-31 11 +189 val_189 2008-12-31 11 +454 val_454 2008-12-31 11 +375 val_375 2008-12-31 11 +401 val_401 2008-12-31 11 +421 val_421 2008-12-31 11 +407 val_407 2008-12-31 11 +384 val_384 2008-12-31 11 +256 val_256 2008-12-31 11 +26 val_26 2008-12-31 11 +134 val_134 2008-12-31 11 +67 val_67 2008-12-31 11 +384 val_384 2008-12-31 11 +379 val_379 2008-12-31 11 +18 val_18 2008-12-31 11 +462 val_462 2008-12-31 11 +492 val_492 2008-12-31 11 +100 val_100 2008-12-31 11 +298 val_298 2008-12-31 11 +9 val_9 2008-12-31 11 +341 val_341 2008-12-31 11 +498 val_498 2008-12-31 11 +146 val_146 2008-12-31 11 +458 val_458 2008-12-31 11 +362 val_362 2008-12-31 11 +186 val_186 2008-12-31 11 +285 val_285 2008-12-31 11 +348 val_348 2008-12-31 11 +167 val_167 2008-12-31 11 +18 val_18 2008-12-31 11 +273 val_273 2008-12-31 11 +183 val_183 2008-12-31 11 +281 val_281 2008-12-31 11 +344 val_344 2008-12-31 11 +97 val_97 2008-12-31 11 +469 val_469 2008-12-31 11 +315 val_315 2008-12-31 11 +84 val_84 2008-12-31 11 +28 val_28 2008-12-31 11 +37 val_37 2008-12-31 11 +448 val_448 2008-12-31 11 +152 val_152 2008-12-31 11 +348 val_348 2008-12-31 11 +307 val_307 2008-12-31 11 +194 val_194 2008-12-31 11 +414 val_414 2008-12-31 11 +477 val_477 2008-12-31 11 +222 val_222 2008-12-31 11 +126 val_126 2008-12-31 11 +90 val_90 2008-12-31 11 +169 val_169 2008-12-31 11 +403 val_403 2008-12-31 11 +400 val_400 2008-12-31 11 +200 val_200 2008-12-31 11 +97 val_97 2008-12-31 11 +238 val_238 2008-12-31 12 +86 val_86 2008-12-31 12 +311 val_311 2008-12-31 12 +27 val_27 2008-12-31 12 +165 val_165 2008-12-31 12 +409 val_409 2008-12-31 12 +255 val_255 2008-12-31 12 +278 val_278 2008-12-31 12 +98 val_98 2008-12-31 12 +484 val_484 2008-12-31 12 +265 val_265 2008-12-31 12 +193 val_193 2008-12-31 12 +401 val_401 2008-12-31 12 +150 val_150 2008-12-31 12 +273 val_273 2008-12-31 12 +224 val_224 2008-12-31 12 +369 val_369 2008-12-31 12 +66 val_66 2008-12-31 12 +128 val_128 2008-12-31 12 +213 val_213 2008-12-31 12 +146 val_146 2008-12-31 12 +406 val_406 2008-12-31 12 +429 val_429 2008-12-31 12 +374 val_374 2008-12-31 12 +152 val_152 2008-12-31 12 +469 val_469 2008-12-31 12 +145 val_145 2008-12-31 12 +495 val_495 2008-12-31 12 +37 val_37 2008-12-31 12 +327 val_327 2008-12-31 12 +281 val_281 2008-12-31 12 +277 val_277 2008-12-31 12 +209 val_209 2008-12-31 12 +15 val_15 2008-12-31 12 +82 val_82 2008-12-31 12 +403 val_403 2008-12-31 12 +166 val_166 2008-12-31 12 +417 val_417 2008-12-31 12 +430 val_430 2008-12-31 12 +252 val_252 2008-12-31 12 +292 val_292 2008-12-31 12 +219 val_219 2008-12-31 12 +287 val_287 2008-12-31 12 +153 val_153 2008-12-31 12 +193 val_193 2008-12-31 12 +338 val_338 2008-12-31 12 +446 val_446 2008-12-31 12 +459 val_459 2008-12-31 12 +394 val_394 2008-12-31 12 +237 val_237 2008-12-31 12 +482 val_482 2008-12-31 12 +174 val_174 2008-12-31 12 +413 val_413 2008-12-31 12 +494 val_494 2008-12-31 12 +207 val_207 2008-12-31 12 +199 val_199 2008-12-31 12 +466 val_466 2008-12-31 12 +208 val_208 2008-12-31 12 +174 val_174 2008-12-31 12 +399 val_399 2008-12-31 12 +396 val_396 2008-12-31 12 +247 val_247 2008-12-31 12 +417 val_417 2008-12-31 12 +489 val_489 2008-12-31 12 +162 val_162 2008-12-31 12 +377 val_377 2008-12-31 12 +397 val_397 2008-12-31 12 +309 val_309 2008-12-31 12 +365 val_365 2008-12-31 12 +266 val_266 2008-12-31 12 +439 val_439 2008-12-31 12 +342 val_342 2008-12-31 12 +367 val_367 2008-12-31 12 +325 val_325 2008-12-31 12 +167 val_167 2008-12-31 12 +195 val_195 2008-12-31 12 +475 val_475 2008-12-31 12 +17 val_17 2008-12-31 12 +113 val_113 2008-12-31 12 +155 val_155 2008-12-31 12 +203 val_203 2008-12-31 12 +339 val_339 2008-12-31 12 +0 val_0 2008-12-31 12 +455 val_455 2008-12-31 12 +128 val_128 2008-12-31 12 +311 val_311 2008-12-31 12 +316 val_316 2008-12-31 12 +57 val_57 2008-12-31 12 +302 val_302 2008-12-31 12 +205 val_205 2008-12-31 12 +149 val_149 2008-12-31 12 +438 val_438 2008-12-31 12 +345 val_345 2008-12-31 12 +129 val_129 2008-12-31 12 +170 val_170 2008-12-31 12 +20 val_20 2008-12-31 12 +489 val_489 2008-12-31 12 +157 val_157 2008-12-31 12 +378 val_378 2008-12-31 12 +221 val_221 2008-12-31 12 +92 val_92 2008-12-31 12 +111 val_111 2008-12-31 12 +47 val_47 2008-12-31 12 +72 val_72 2008-12-31 12 +4 val_4 2008-12-31 12 +280 val_280 2008-12-31 12 +35 val_35 2008-12-31 12 +427 val_427 2008-12-31 12 +277 val_277 2008-12-31 12 +208 val_208 2008-12-31 12 +356 val_356 2008-12-31 12 +399 val_399 2008-12-31 12 +169 val_169 2008-12-31 12 +382 val_382 2008-12-31 12 +498 val_498 2008-12-31 12 +125 val_125 2008-12-31 12 +386 val_386 2008-12-31 12 +437 val_437 2008-12-31 12 +469 val_469 2008-12-31 12 +192 val_192 2008-12-31 12 +286 val_286 2008-12-31 12 +187 val_187 2008-12-31 12 +176 val_176 2008-12-31 12 +54 val_54 2008-12-31 12 +459 val_459 2008-12-31 12 +51 val_51 2008-12-31 12 +138 val_138 2008-12-31 12 +103 val_103 2008-12-31 12 +239 val_239 2008-12-31 12 +213 val_213 2008-12-31 12 +216 val_216 2008-12-31 12 +430 val_430 2008-12-31 12 +278 val_278 2008-12-31 12 +176 val_176 2008-12-31 12 +289 val_289 2008-12-31 12 +221 val_221 2008-12-31 12 +65 val_65 2008-12-31 12 +318 val_318 2008-12-31 12 +332 val_332 2008-12-31 12 +311 val_311 2008-12-31 12 +275 val_275 2008-12-31 12 +137 val_137 2008-12-31 12 +241 val_241 2008-12-31 12 +83 val_83 2008-12-31 12 +333 val_333 2008-12-31 12 +180 val_180 2008-12-31 12 +284 val_284 2008-12-31 12 +12 val_12 2008-12-31 12 +230 val_230 2008-12-31 12 +181 val_181 2008-12-31 12 +67 val_67 2008-12-31 12 +260 val_260 2008-12-31 12 +404 val_404 2008-12-31 12 +384 val_384 2008-12-31 12 +489 val_489 2008-12-31 12 +353 val_353 2008-12-31 12 +373 val_373 2008-12-31 12 +272 val_272 2008-12-31 12 +138 val_138 2008-12-31 12 +217 val_217 2008-12-31 12 +84 val_84 2008-12-31 12 +348 val_348 2008-12-31 12 +466 val_466 2008-12-31 12 +58 val_58 2008-12-31 12 +8 val_8 2008-12-31 12 +411 val_411 2008-12-31 12 +230 val_230 2008-12-31 12 +208 val_208 2008-12-31 12 +348 val_348 2008-12-31 12 +24 val_24 2008-12-31 12 +463 val_463 2008-12-31 12 +431 val_431 2008-12-31 12 +179 val_179 2008-12-31 12 +172 val_172 2008-12-31 12 +42 val_42 2008-12-31 12 +129 val_129 2008-12-31 12 +158 val_158 2008-12-31 12 +119 val_119 2008-12-31 12 +496 val_496 2008-12-31 12 +0 val_0 2008-12-31 12 +322 val_322 2008-12-31 12 +197 val_197 2008-12-31 12 +468 val_468 2008-12-31 12 +393 val_393 2008-12-31 12 +454 val_454 2008-12-31 12 +100 val_100 2008-12-31 12 +298 val_298 2008-12-31 12 +199 val_199 2008-12-31 12 +191 val_191 2008-12-31 12 +418 val_418 2008-12-31 12 +96 val_96 2008-12-31 12 +26 val_26 2008-12-31 12 +165 val_165 2008-12-31 12 +327 val_327 2008-12-31 12 +230 val_230 2008-12-31 12 +205 val_205 2008-12-31 12 +120 val_120 2008-12-31 12 +131 val_131 2008-12-31 12 +51 val_51 2008-12-31 12 +404 val_404 2008-12-31 12 +43 val_43 2008-12-31 12 +436 val_436 2008-12-31 12 +156 val_156 2008-12-31 12 +469 val_469 2008-12-31 12 +468 val_468 2008-12-31 12 +308 val_308 2008-12-31 12 +95 val_95 2008-12-31 12 +196 val_196 2008-12-31 12 +288 val_288 2008-12-31 12 +481 val_481 2008-12-31 12 +457 val_457 2008-12-31 12 +98 val_98 2008-12-31 12 +282 val_282 2008-12-31 12 +197 val_197 2008-12-31 12 +187 val_187 2008-12-31 12 +318 val_318 2008-12-31 12 +318 val_318 2008-12-31 12 +409 val_409 2008-12-31 12 +470 val_470 2008-12-31 12 +137 val_137 2008-12-31 12 +369 val_369 2008-12-31 12 +316 val_316 2008-12-31 12 +169 val_169 2008-12-31 12 +413 val_413 2008-12-31 12 +85 val_85 2008-12-31 12 +77 val_77 2008-12-31 12 +0 val_0 2008-12-31 12 +490 val_490 2008-12-31 12 +87 val_87 2008-12-31 12 +364 val_364 2008-12-31 12 +179 val_179 2008-12-31 12 +118 val_118 2008-12-31 12 +134 val_134 2008-12-31 12 +395 val_395 2008-12-31 12 +282 val_282 2008-12-31 12 +138 val_138 2008-12-31 12 +238 val_238 2008-12-31 12 +419 val_419 2008-12-31 12 +15 val_15 2008-12-31 12 +118 val_118 2008-12-31 12 +72 val_72 2008-12-31 12 +90 val_90 2008-12-31 12 +307 val_307 2008-12-31 12 +19 val_19 2008-12-31 12 +435 val_435 2008-12-31 12 +10 val_10 2008-12-31 12 +277 val_277 2008-12-31 12 +273 val_273 2008-12-31 12 +306 val_306 2008-12-31 12 +224 val_224 2008-12-31 12 +309 val_309 2008-12-31 12 +389 val_389 2008-12-31 12 +327 val_327 2008-12-31 12 +242 val_242 2008-12-31 12 +369 val_369 2008-12-31 12 +392 val_392 2008-12-31 12 +272 val_272 2008-12-31 12 +331 val_331 2008-12-31 12 +401 val_401 2008-12-31 12 +242 val_242 2008-12-31 12 +452 val_452 2008-12-31 12 +177 val_177 2008-12-31 12 +226 val_226 2008-12-31 12 +5 val_5 2008-12-31 12 +497 val_497 2008-12-31 12 +402 val_402 2008-12-31 12 +396 val_396 2008-12-31 12 +317 val_317 2008-12-31 12 +395 val_395 2008-12-31 12 +58 val_58 2008-12-31 12 +35 val_35 2008-12-31 12 +336 val_336 2008-12-31 12 +95 val_95 2008-12-31 12 +11 val_11 2008-12-31 12 +168 val_168 2008-12-31 12 +34 val_34 2008-12-31 12 +229 val_229 2008-12-31 12 +233 val_233 2008-12-31 12 +143 val_143 2008-12-31 12 +472 val_472 2008-12-31 12 +322 val_322 2008-12-31 12 +498 val_498 2008-12-31 12 +160 val_160 2008-12-31 12 +195 val_195 2008-12-31 12 +42 val_42 2008-12-31 12 +321 val_321 2008-12-31 12 +430 val_430 2008-12-31 12 +119 val_119 2008-12-31 12 +489 val_489 2008-12-31 12 +458 val_458 2008-12-31 12 +78 val_78 2008-12-31 12 +76 val_76 2008-12-31 12 +41 val_41 2008-12-31 12 +223 val_223 2008-12-31 12 +492 val_492 2008-12-31 12 +149 val_149 2008-12-31 12 +449 val_449 2008-12-31 12 +218 val_218 2008-12-31 12 +228 val_228 2008-12-31 12 +138 val_138 2008-12-31 12 +453 val_453 2008-12-31 12 +30 val_30 2008-12-31 12 +209 val_209 2008-12-31 12 +64 val_64 2008-12-31 12 +468 val_468 2008-12-31 12 +76 val_76 2008-12-31 12 +74 val_74 2008-12-31 12 +342 val_342 2008-12-31 12 +69 val_69 2008-12-31 12 +230 val_230 2008-12-31 12 +33 val_33 2008-12-31 12 +368 val_368 2008-12-31 12 +103 val_103 2008-12-31 12 +296 val_296 2008-12-31 12 +113 val_113 2008-12-31 12 +216 val_216 2008-12-31 12 +367 val_367 2008-12-31 12 +344 val_344 2008-12-31 12 +167 val_167 2008-12-31 12 +274 val_274 2008-12-31 12 +219 val_219 2008-12-31 12 +239 val_239 2008-12-31 12 +485 val_485 2008-12-31 12 +116 val_116 2008-12-31 12 +223 val_223 2008-12-31 12 +256 val_256 2008-12-31 12 +263 val_263 2008-12-31 12 +70 val_70 2008-12-31 12 +487 val_487 2008-12-31 12 +480 val_480 2008-12-31 12 +401 val_401 2008-12-31 12 +288 val_288 2008-12-31 12 +191 val_191 2008-12-31 12 +5 val_5 2008-12-31 12 +244 val_244 2008-12-31 12 +438 val_438 2008-12-31 12 +128 val_128 2008-12-31 12 +467 val_467 2008-12-31 12 +432 val_432 2008-12-31 12 +202 val_202 2008-12-31 12 +316 val_316 2008-12-31 12 +229 val_229 2008-12-31 12 +469 val_469 2008-12-31 12 +463 val_463 2008-12-31 12 +280 val_280 2008-12-31 12 +2 val_2 2008-12-31 12 +35 val_35 2008-12-31 12 +283 val_283 2008-12-31 12 +331 val_331 2008-12-31 12 +235 val_235 2008-12-31 12 +80 val_80 2008-12-31 12 +44 val_44 2008-12-31 12 +193 val_193 2008-12-31 12 +321 val_321 2008-12-31 12 +335 val_335 2008-12-31 12 +104 val_104 2008-12-31 12 +466 val_466 2008-12-31 12 +366 val_366 2008-12-31 12 +175 val_175 2008-12-31 12 +403 val_403 2008-12-31 12 +483 val_483 2008-12-31 12 +53 val_53 2008-12-31 12 +105 val_105 2008-12-31 12 +257 val_257 2008-12-31 12 +406 val_406 2008-12-31 12 +409 val_409 2008-12-31 12 +190 val_190 2008-12-31 12 +406 val_406 2008-12-31 12 +401 val_401 2008-12-31 12 +114 val_114 2008-12-31 12 +258 val_258 2008-12-31 12 +90 val_90 2008-12-31 12 +203 val_203 2008-12-31 12 +262 val_262 2008-12-31 12 +348 val_348 2008-12-31 12 +424 val_424 2008-12-31 12 +12 val_12 2008-12-31 12 +396 val_396 2008-12-31 12 +201 val_201 2008-12-31 12 +217 val_217 2008-12-31 12 +164 val_164 2008-12-31 12 +431 val_431 2008-12-31 12 +454 val_454 2008-12-31 12 +478 val_478 2008-12-31 12 +298 val_298 2008-12-31 12 +125 val_125 2008-12-31 12 +431 val_431 2008-12-31 12 +164 val_164 2008-12-31 12 +424 val_424 2008-12-31 12 +187 val_187 2008-12-31 12 +382 val_382 2008-12-31 12 +5 val_5 2008-12-31 12 +70 val_70 2008-12-31 12 +397 val_397 2008-12-31 12 +480 val_480 2008-12-31 12 +291 val_291 2008-12-31 12 +24 val_24 2008-12-31 12 +351 val_351 2008-12-31 12 +255 val_255 2008-12-31 12 +104 val_104 2008-12-31 12 +70 val_70 2008-12-31 12 +163 val_163 2008-12-31 12 +438 val_438 2008-12-31 12 +119 val_119 2008-12-31 12 +414 val_414 2008-12-31 12 +200 val_200 2008-12-31 12 +491 val_491 2008-12-31 12 +237 val_237 2008-12-31 12 +439 val_439 2008-12-31 12 +360 val_360 2008-12-31 12 +248 val_248 2008-12-31 12 +479 val_479 2008-12-31 12 +305 val_305 2008-12-31 12 +417 val_417 2008-12-31 12 +199 val_199 2008-12-31 12 +444 val_444 2008-12-31 12 +120 val_120 2008-12-31 12 +429 val_429 2008-12-31 12 +169 val_169 2008-12-31 12 +443 val_443 2008-12-31 12 +323 val_323 2008-12-31 12 +325 val_325 2008-12-31 12 +277 val_277 2008-12-31 12 +230 val_230 2008-12-31 12 +478 val_478 2008-12-31 12 +178 val_178 2008-12-31 12 +468 val_468 2008-12-31 12 +310 val_310 2008-12-31 12 +317 val_317 2008-12-31 12 +333 val_333 2008-12-31 12 +493 val_493 2008-12-31 12 +460 val_460 2008-12-31 12 +207 val_207 2008-12-31 12 +249 val_249 2008-12-31 12 +265 val_265 2008-12-31 12 +480 val_480 2008-12-31 12 +83 val_83 2008-12-31 12 +136 val_136 2008-12-31 12 +353 val_353 2008-12-31 12 +172 val_172 2008-12-31 12 +214 val_214 2008-12-31 12 +462 val_462 2008-12-31 12 +233 val_233 2008-12-31 12 +406 val_406 2008-12-31 12 +133 val_133 2008-12-31 12 +175 val_175 2008-12-31 12 +189 val_189 2008-12-31 12 +454 val_454 2008-12-31 12 +375 val_375 2008-12-31 12 +401 val_401 2008-12-31 12 +421 val_421 2008-12-31 12 +407 val_407 2008-12-31 12 +384 val_384 2008-12-31 12 +256 val_256 2008-12-31 12 +26 val_26 2008-12-31 12 +134 val_134 2008-12-31 12 +67 val_67 2008-12-31 12 +384 val_384 2008-12-31 12 +379 val_379 2008-12-31 12 +18 val_18 2008-12-31 12 +462 val_462 2008-12-31 12 +492 val_492 2008-12-31 12 +100 val_100 2008-12-31 12 +298 val_298 2008-12-31 12 +9 val_9 2008-12-31 12 +341 val_341 2008-12-31 12 +498 val_498 2008-12-31 12 +146 val_146 2008-12-31 12 +458 val_458 2008-12-31 12 +362 val_362 2008-12-31 12 +186 val_186 2008-12-31 12 +285 val_285 2008-12-31 12 +348 val_348 2008-12-31 12 +167 val_167 2008-12-31 12 +18 val_18 2008-12-31 12 +273 val_273 2008-12-31 12 +183 val_183 2008-12-31 12 +281 val_281 2008-12-31 12 +344 val_344 2008-12-31 12 +97 val_97 2008-12-31 12 +469 val_469 2008-12-31 12 +315 val_315 2008-12-31 12 +84 val_84 2008-12-31 12 +28 val_28 2008-12-31 12 +37 val_37 2008-12-31 12 +448 val_448 2008-12-31 12 +152 val_152 2008-12-31 12 +348 val_348 2008-12-31 12 +307 val_307 2008-12-31 12 +194 val_194 2008-12-31 12 +414 val_414 2008-12-31 12 +477 val_477 2008-12-31 12 +222 val_222 2008-12-31 12 +126 val_126 2008-12-31 12 +90 val_90 2008-12-31 12 +169 val_169 2008-12-31 12 +403 val_403 2008-12-31 12 +400 val_400 2008-12-31 12 +200 val_200 2008-12-31 12 +97 val_97 2008-12-31 12 Index: ql/src/test/results/clientpositive/tez/load_dyn_part2.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/load_dyn_part2.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/load_dyn_part2.q.out (working copy) @@ -0,0 +1,2158 @@ +PREHOOK: query: create table if not exists nzhang_part_bucket (key string, value string) + partitioned by (ds string, hr string) + clustered by (key) into 10 buckets +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table if not exists nzhang_part_bucket (key string, value string) + partitioned by (ds string, hr string) + clustered by (key) into 10 buckets +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@nzhang_part_bucket +PREHOOK: query: describe extended nzhang_part_bucket +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe extended nzhang_part_bucket +POSTHOOK: type: DESCTABLE +key string None +value string None +ds string None +hr string None + +# Partition Information +# col_name data_type comment + +ds string None +hr string None + +#### A masked pattern was here #### +PREHOOK: query: explain +insert overwrite table nzhang_part_bucket partition (ds='2010-03-23', hr) select key, value, hr from srcpart where ds is not null and hr is not null +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table nzhang_part_bucket partition (ds='2010-03-23', hr) select key, value, hr from srcpart where ds is not null and hr is not null +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME nzhang_part_bucket) (TOK_PARTSPEC (TOK_PARTVAL ds '2010-03-23') (TOK_PARTVAL hr)))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_TABLE_OR_COL hr))) (TOK_WHERE (and (TOK_FUNCTION TOK_ISNOTNULL (TOK_TABLE_OR_COL ds)) (TOK_FUNCTION TOK_ISNOTNULL (TOK_TABLE_OR_COL hr)))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + srcpart + TableScan + alias: srcpart + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + expr: hr + type: string + outputColumnNames: _col0, _col1, _col2 + Reduce Output Operator + sort order: + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col2 + type: string + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part_bucket + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 2010-03-23 + hr + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part_bucket + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table nzhang_part_bucket partition (ds='2010-03-23', hr) select key, value, hr from srcpart where ds is not null and hr is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@nzhang_part_bucket@ds=2010-03-23 +POSTHOOK: query: insert overwrite table nzhang_part_bucket partition (ds='2010-03-23', hr) select key, value, hr from srcpart where ds is not null and hr is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@nzhang_part_bucket@ds=2010-03-23/hr=11 +POSTHOOK: Output: default@nzhang_part_bucket@ds=2010-03-23/hr=12 +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions nzhang_part_bucket +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: show partitions nzhang_part_bucket +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +ds=2010-03-23/hr=11 +ds=2010-03-23/hr=12 +PREHOOK: query: select * from nzhang_part_bucket where ds='2010-03-23' and hr='11' order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@nzhang_part_bucket +PREHOOK: Input: default@nzhang_part_bucket@ds=2010-03-23/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select * from nzhang_part_bucket where ds='2010-03-23' and hr='11' order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@nzhang_part_bucket +POSTHOOK: Input: default@nzhang_part_bucket@ds=2010-03-23/hr=11 +#### A masked pattern was here #### +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +0 val_0 2010-03-23 11 +0 val_0 2010-03-23 11 +0 val_0 2010-03-23 11 +0 val_0 2010-03-23 11 +0 val_0 2010-03-23 11 +0 val_0 2010-03-23 11 +10 val_10 2010-03-23 11 +10 val_10 2010-03-23 11 +100 val_100 2010-03-23 11 +100 val_100 2010-03-23 11 +100 val_100 2010-03-23 11 +100 val_100 2010-03-23 11 +103 val_103 2010-03-23 11 +103 val_103 2010-03-23 11 +103 val_103 2010-03-23 11 +103 val_103 2010-03-23 11 +104 val_104 2010-03-23 11 +104 val_104 2010-03-23 11 +104 val_104 2010-03-23 11 +104 val_104 2010-03-23 11 +105 val_105 2010-03-23 11 +105 val_105 2010-03-23 11 +11 val_11 2010-03-23 11 +11 val_11 2010-03-23 11 +111 val_111 2010-03-23 11 +111 val_111 2010-03-23 11 +113 val_113 2010-03-23 11 +113 val_113 2010-03-23 11 +113 val_113 2010-03-23 11 +113 val_113 2010-03-23 11 +114 val_114 2010-03-23 11 +114 val_114 2010-03-23 11 +116 val_116 2010-03-23 11 +116 val_116 2010-03-23 11 +118 val_118 2010-03-23 11 +118 val_118 2010-03-23 11 +118 val_118 2010-03-23 11 +118 val_118 2010-03-23 11 +119 val_119 2010-03-23 11 +119 val_119 2010-03-23 11 +119 val_119 2010-03-23 11 +119 val_119 2010-03-23 11 +119 val_119 2010-03-23 11 +119 val_119 2010-03-23 11 +12 val_12 2010-03-23 11 +12 val_12 2010-03-23 11 +12 val_12 2010-03-23 11 +12 val_12 2010-03-23 11 +120 val_120 2010-03-23 11 +120 val_120 2010-03-23 11 +120 val_120 2010-03-23 11 +120 val_120 2010-03-23 11 +125 val_125 2010-03-23 11 +125 val_125 2010-03-23 11 +125 val_125 2010-03-23 11 +125 val_125 2010-03-23 11 +126 val_126 2010-03-23 11 +126 val_126 2010-03-23 11 +128 val_128 2010-03-23 11 +128 val_128 2010-03-23 11 +128 val_128 2010-03-23 11 +128 val_128 2010-03-23 11 +128 val_128 2010-03-23 11 +128 val_128 2010-03-23 11 +129 val_129 2010-03-23 11 +129 val_129 2010-03-23 11 +129 val_129 2010-03-23 11 +129 val_129 2010-03-23 11 +131 val_131 2010-03-23 11 +131 val_131 2010-03-23 11 +133 val_133 2010-03-23 11 +133 val_133 2010-03-23 11 +134 val_134 2010-03-23 11 +134 val_134 2010-03-23 11 +134 val_134 2010-03-23 11 +134 val_134 2010-03-23 11 +136 val_136 2010-03-23 11 +136 val_136 2010-03-23 11 +137 val_137 2010-03-23 11 +137 val_137 2010-03-23 11 +137 val_137 2010-03-23 11 +137 val_137 2010-03-23 11 +138 val_138 2010-03-23 11 +138 val_138 2010-03-23 11 +138 val_138 2010-03-23 11 +138 val_138 2010-03-23 11 +138 val_138 2010-03-23 11 +138 val_138 2010-03-23 11 +138 val_138 2010-03-23 11 +138 val_138 2010-03-23 11 +143 val_143 2010-03-23 11 +143 val_143 2010-03-23 11 +145 val_145 2010-03-23 11 +145 val_145 2010-03-23 11 +146 val_146 2010-03-23 11 +146 val_146 2010-03-23 11 +146 val_146 2010-03-23 11 +146 val_146 2010-03-23 11 +149 val_149 2010-03-23 11 +149 val_149 2010-03-23 11 +149 val_149 2010-03-23 11 +149 val_149 2010-03-23 11 +15 val_15 2010-03-23 11 +15 val_15 2010-03-23 11 +15 val_15 2010-03-23 11 +15 val_15 2010-03-23 11 +150 val_150 2010-03-23 11 +150 val_150 2010-03-23 11 +152 val_152 2010-03-23 11 +152 val_152 2010-03-23 11 +152 val_152 2010-03-23 11 +152 val_152 2010-03-23 11 +153 val_153 2010-03-23 11 +153 val_153 2010-03-23 11 +155 val_155 2010-03-23 11 +155 val_155 2010-03-23 11 +156 val_156 2010-03-23 11 +156 val_156 2010-03-23 11 +157 val_157 2010-03-23 11 +157 val_157 2010-03-23 11 +158 val_158 2010-03-23 11 +158 val_158 2010-03-23 11 +160 val_160 2010-03-23 11 +160 val_160 2010-03-23 11 +162 val_162 2010-03-23 11 +162 val_162 2010-03-23 11 +163 val_163 2010-03-23 11 +163 val_163 2010-03-23 11 +164 val_164 2010-03-23 11 +164 val_164 2010-03-23 11 +164 val_164 2010-03-23 11 +164 val_164 2010-03-23 11 +165 val_165 2010-03-23 11 +165 val_165 2010-03-23 11 +165 val_165 2010-03-23 11 +165 val_165 2010-03-23 11 +166 val_166 2010-03-23 11 +166 val_166 2010-03-23 11 +167 val_167 2010-03-23 11 +167 val_167 2010-03-23 11 +167 val_167 2010-03-23 11 +167 val_167 2010-03-23 11 +167 val_167 2010-03-23 11 +167 val_167 2010-03-23 11 +168 val_168 2010-03-23 11 +168 val_168 2010-03-23 11 +169 val_169 2010-03-23 11 +169 val_169 2010-03-23 11 +169 val_169 2010-03-23 11 +169 val_169 2010-03-23 11 +169 val_169 2010-03-23 11 +169 val_169 2010-03-23 11 +169 val_169 2010-03-23 11 +169 val_169 2010-03-23 11 +17 val_17 2010-03-23 11 +17 val_17 2010-03-23 11 +170 val_170 2010-03-23 11 +170 val_170 2010-03-23 11 +172 val_172 2010-03-23 11 +172 val_172 2010-03-23 11 +172 val_172 2010-03-23 11 +172 val_172 2010-03-23 11 +174 val_174 2010-03-23 11 +174 val_174 2010-03-23 11 +174 val_174 2010-03-23 11 +174 val_174 2010-03-23 11 +175 val_175 2010-03-23 11 +175 val_175 2010-03-23 11 +175 val_175 2010-03-23 11 +175 val_175 2010-03-23 11 +176 val_176 2010-03-23 11 +176 val_176 2010-03-23 11 +176 val_176 2010-03-23 11 +176 val_176 2010-03-23 11 +177 val_177 2010-03-23 11 +177 val_177 2010-03-23 11 +178 val_178 2010-03-23 11 +178 val_178 2010-03-23 11 +179 val_179 2010-03-23 11 +179 val_179 2010-03-23 11 +179 val_179 2010-03-23 11 +179 val_179 2010-03-23 11 +18 val_18 2010-03-23 11 +18 val_18 2010-03-23 11 +18 val_18 2010-03-23 11 +18 val_18 2010-03-23 11 +180 val_180 2010-03-23 11 +180 val_180 2010-03-23 11 +181 val_181 2010-03-23 11 +181 val_181 2010-03-23 11 +183 val_183 2010-03-23 11 +183 val_183 2010-03-23 11 +186 val_186 2010-03-23 11 +186 val_186 2010-03-23 11 +187 val_187 2010-03-23 11 +187 val_187 2010-03-23 11 +187 val_187 2010-03-23 11 +187 val_187 2010-03-23 11 +187 val_187 2010-03-23 11 +187 val_187 2010-03-23 11 +189 val_189 2010-03-23 11 +189 val_189 2010-03-23 11 +19 val_19 2010-03-23 11 +19 val_19 2010-03-23 11 +190 val_190 2010-03-23 11 +190 val_190 2010-03-23 11 +191 val_191 2010-03-23 11 +191 val_191 2010-03-23 11 +191 val_191 2010-03-23 11 +191 val_191 2010-03-23 11 +192 val_192 2010-03-23 11 +192 val_192 2010-03-23 11 +193 val_193 2010-03-23 11 +193 val_193 2010-03-23 11 +193 val_193 2010-03-23 11 +193 val_193 2010-03-23 11 +193 val_193 2010-03-23 11 +193 val_193 2010-03-23 11 +194 val_194 2010-03-23 11 +194 val_194 2010-03-23 11 +195 val_195 2010-03-23 11 +195 val_195 2010-03-23 11 +195 val_195 2010-03-23 11 +195 val_195 2010-03-23 11 +196 val_196 2010-03-23 11 +196 val_196 2010-03-23 11 +197 val_197 2010-03-23 11 +197 val_197 2010-03-23 11 +197 val_197 2010-03-23 11 +197 val_197 2010-03-23 11 +199 val_199 2010-03-23 11 +199 val_199 2010-03-23 11 +199 val_199 2010-03-23 11 +199 val_199 2010-03-23 11 +199 val_199 2010-03-23 11 +199 val_199 2010-03-23 11 +2 val_2 2010-03-23 11 +2 val_2 2010-03-23 11 +20 val_20 2010-03-23 11 +20 val_20 2010-03-23 11 +200 val_200 2010-03-23 11 +200 val_200 2010-03-23 11 +200 val_200 2010-03-23 11 +200 val_200 2010-03-23 11 +201 val_201 2010-03-23 11 +201 val_201 2010-03-23 11 +202 val_202 2010-03-23 11 +202 val_202 2010-03-23 11 +203 val_203 2010-03-23 11 +203 val_203 2010-03-23 11 +203 val_203 2010-03-23 11 +203 val_203 2010-03-23 11 +205 val_205 2010-03-23 11 +205 val_205 2010-03-23 11 +205 val_205 2010-03-23 11 +205 val_205 2010-03-23 11 +207 val_207 2010-03-23 11 +207 val_207 2010-03-23 11 +207 val_207 2010-03-23 11 +207 val_207 2010-03-23 11 +208 val_208 2010-03-23 11 +208 val_208 2010-03-23 11 +208 val_208 2010-03-23 11 +208 val_208 2010-03-23 11 +208 val_208 2010-03-23 11 +208 val_208 2010-03-23 11 +209 val_209 2010-03-23 11 +209 val_209 2010-03-23 11 +209 val_209 2010-03-23 11 +209 val_209 2010-03-23 11 +213 val_213 2010-03-23 11 +213 val_213 2010-03-23 11 +213 val_213 2010-03-23 11 +213 val_213 2010-03-23 11 +214 val_214 2010-03-23 11 +214 val_214 2010-03-23 11 +216 val_216 2010-03-23 11 +216 val_216 2010-03-23 11 +216 val_216 2010-03-23 11 +216 val_216 2010-03-23 11 +217 val_217 2010-03-23 11 +217 val_217 2010-03-23 11 +217 val_217 2010-03-23 11 +217 val_217 2010-03-23 11 +218 val_218 2010-03-23 11 +218 val_218 2010-03-23 11 +219 val_219 2010-03-23 11 +219 val_219 2010-03-23 11 +219 val_219 2010-03-23 11 +219 val_219 2010-03-23 11 +221 val_221 2010-03-23 11 +221 val_221 2010-03-23 11 +221 val_221 2010-03-23 11 +221 val_221 2010-03-23 11 +222 val_222 2010-03-23 11 +222 val_222 2010-03-23 11 +223 val_223 2010-03-23 11 +223 val_223 2010-03-23 11 +223 val_223 2010-03-23 11 +223 val_223 2010-03-23 11 +224 val_224 2010-03-23 11 +224 val_224 2010-03-23 11 +224 val_224 2010-03-23 11 +224 val_224 2010-03-23 11 +226 val_226 2010-03-23 11 +226 val_226 2010-03-23 11 +228 val_228 2010-03-23 11 +228 val_228 2010-03-23 11 +229 val_229 2010-03-23 11 +229 val_229 2010-03-23 11 +229 val_229 2010-03-23 11 +229 val_229 2010-03-23 11 +230 val_230 2010-03-23 11 +230 val_230 2010-03-23 11 +230 val_230 2010-03-23 11 +230 val_230 2010-03-23 11 +230 val_230 2010-03-23 11 +230 val_230 2010-03-23 11 +230 val_230 2010-03-23 11 +230 val_230 2010-03-23 11 +230 val_230 2010-03-23 11 +230 val_230 2010-03-23 11 +233 val_233 2010-03-23 11 +233 val_233 2010-03-23 11 +233 val_233 2010-03-23 11 +233 val_233 2010-03-23 11 +235 val_235 2010-03-23 11 +235 val_235 2010-03-23 11 +237 val_237 2010-03-23 11 +237 val_237 2010-03-23 11 +237 val_237 2010-03-23 11 +237 val_237 2010-03-23 11 +238 val_238 2010-03-23 11 +238 val_238 2010-03-23 11 +238 val_238 2010-03-23 11 +238 val_238 2010-03-23 11 +239 val_239 2010-03-23 11 +239 val_239 2010-03-23 11 +239 val_239 2010-03-23 11 +239 val_239 2010-03-23 11 +24 val_24 2010-03-23 11 +24 val_24 2010-03-23 11 +24 val_24 2010-03-23 11 +24 val_24 2010-03-23 11 +241 val_241 2010-03-23 11 +241 val_241 2010-03-23 11 +242 val_242 2010-03-23 11 +242 val_242 2010-03-23 11 +242 val_242 2010-03-23 11 +242 val_242 2010-03-23 11 +244 val_244 2010-03-23 11 +244 val_244 2010-03-23 11 +247 val_247 2010-03-23 11 +247 val_247 2010-03-23 11 +248 val_248 2010-03-23 11 +248 val_248 2010-03-23 11 +249 val_249 2010-03-23 11 +249 val_249 2010-03-23 11 +252 val_252 2010-03-23 11 +252 val_252 2010-03-23 11 +255 val_255 2010-03-23 11 +255 val_255 2010-03-23 11 +255 val_255 2010-03-23 11 +255 val_255 2010-03-23 11 +256 val_256 2010-03-23 11 +256 val_256 2010-03-23 11 +256 val_256 2010-03-23 11 +256 val_256 2010-03-23 11 +257 val_257 2010-03-23 11 +257 val_257 2010-03-23 11 +258 val_258 2010-03-23 11 +258 val_258 2010-03-23 11 +26 val_26 2010-03-23 11 +26 val_26 2010-03-23 11 +26 val_26 2010-03-23 11 +26 val_26 2010-03-23 11 +260 val_260 2010-03-23 11 +260 val_260 2010-03-23 11 +262 val_262 2010-03-23 11 +262 val_262 2010-03-23 11 +263 val_263 2010-03-23 11 +263 val_263 2010-03-23 11 +265 val_265 2010-03-23 11 +265 val_265 2010-03-23 11 +265 val_265 2010-03-23 11 +265 val_265 2010-03-23 11 +266 val_266 2010-03-23 11 +266 val_266 2010-03-23 11 +27 val_27 2010-03-23 11 +27 val_27 2010-03-23 11 +272 val_272 2010-03-23 11 +272 val_272 2010-03-23 11 +272 val_272 2010-03-23 11 +272 val_272 2010-03-23 11 +273 val_273 2010-03-23 11 +273 val_273 2010-03-23 11 +273 val_273 2010-03-23 11 +273 val_273 2010-03-23 11 +273 val_273 2010-03-23 11 +273 val_273 2010-03-23 11 +274 val_274 2010-03-23 11 +274 val_274 2010-03-23 11 +275 val_275 2010-03-23 11 +275 val_275 2010-03-23 11 +277 val_277 2010-03-23 11 +277 val_277 2010-03-23 11 +277 val_277 2010-03-23 11 +277 val_277 2010-03-23 11 +277 val_277 2010-03-23 11 +277 val_277 2010-03-23 11 +277 val_277 2010-03-23 11 +277 val_277 2010-03-23 11 +278 val_278 2010-03-23 11 +278 val_278 2010-03-23 11 +278 val_278 2010-03-23 11 +278 val_278 2010-03-23 11 +28 val_28 2010-03-23 11 +28 val_28 2010-03-23 11 +280 val_280 2010-03-23 11 +280 val_280 2010-03-23 11 +280 val_280 2010-03-23 11 +280 val_280 2010-03-23 11 +281 val_281 2010-03-23 11 +281 val_281 2010-03-23 11 +281 val_281 2010-03-23 11 +281 val_281 2010-03-23 11 +282 val_282 2010-03-23 11 +282 val_282 2010-03-23 11 +282 val_282 2010-03-23 11 +282 val_282 2010-03-23 11 +283 val_283 2010-03-23 11 +283 val_283 2010-03-23 11 +284 val_284 2010-03-23 11 +284 val_284 2010-03-23 11 +285 val_285 2010-03-23 11 +285 val_285 2010-03-23 11 +286 val_286 2010-03-23 11 +286 val_286 2010-03-23 11 +287 val_287 2010-03-23 11 +287 val_287 2010-03-23 11 +288 val_288 2010-03-23 11 +288 val_288 2010-03-23 11 +288 val_288 2010-03-23 11 +288 val_288 2010-03-23 11 +289 val_289 2010-03-23 11 +289 val_289 2010-03-23 11 +291 val_291 2010-03-23 11 +291 val_291 2010-03-23 11 +292 val_292 2010-03-23 11 +292 val_292 2010-03-23 11 +296 val_296 2010-03-23 11 +296 val_296 2010-03-23 11 +298 val_298 2010-03-23 11 +298 val_298 2010-03-23 11 +298 val_298 2010-03-23 11 +298 val_298 2010-03-23 11 +298 val_298 2010-03-23 11 +298 val_298 2010-03-23 11 +30 val_30 2010-03-23 11 +30 val_30 2010-03-23 11 +302 val_302 2010-03-23 11 +302 val_302 2010-03-23 11 +305 val_305 2010-03-23 11 +305 val_305 2010-03-23 11 +306 val_306 2010-03-23 11 +306 val_306 2010-03-23 11 +307 val_307 2010-03-23 11 +307 val_307 2010-03-23 11 +307 val_307 2010-03-23 11 +307 val_307 2010-03-23 11 +308 val_308 2010-03-23 11 +308 val_308 2010-03-23 11 +309 val_309 2010-03-23 11 +309 val_309 2010-03-23 11 +309 val_309 2010-03-23 11 +309 val_309 2010-03-23 11 +310 val_310 2010-03-23 11 +310 val_310 2010-03-23 11 +311 val_311 2010-03-23 11 +311 val_311 2010-03-23 11 +311 val_311 2010-03-23 11 +311 val_311 2010-03-23 11 +311 val_311 2010-03-23 11 +311 val_311 2010-03-23 11 +315 val_315 2010-03-23 11 +315 val_315 2010-03-23 11 +316 val_316 2010-03-23 11 +316 val_316 2010-03-23 11 +316 val_316 2010-03-23 11 +316 val_316 2010-03-23 11 +316 val_316 2010-03-23 11 +316 val_316 2010-03-23 11 +317 val_317 2010-03-23 11 +317 val_317 2010-03-23 11 +317 val_317 2010-03-23 11 +317 val_317 2010-03-23 11 +318 val_318 2010-03-23 11 +318 val_318 2010-03-23 11 +318 val_318 2010-03-23 11 +318 val_318 2010-03-23 11 +318 val_318 2010-03-23 11 +318 val_318 2010-03-23 11 +321 val_321 2010-03-23 11 +321 val_321 2010-03-23 11 +321 val_321 2010-03-23 11 +321 val_321 2010-03-23 11 +322 val_322 2010-03-23 11 +322 val_322 2010-03-23 11 +322 val_322 2010-03-23 11 +322 val_322 2010-03-23 11 +323 val_323 2010-03-23 11 +323 val_323 2010-03-23 11 +325 val_325 2010-03-23 11 +325 val_325 2010-03-23 11 +325 val_325 2010-03-23 11 +325 val_325 2010-03-23 11 +327 val_327 2010-03-23 11 +327 val_327 2010-03-23 11 +327 val_327 2010-03-23 11 +327 val_327 2010-03-23 11 +327 val_327 2010-03-23 11 +327 val_327 2010-03-23 11 +33 val_33 2010-03-23 11 +33 val_33 2010-03-23 11 +331 val_331 2010-03-23 11 +331 val_331 2010-03-23 11 +331 val_331 2010-03-23 11 +331 val_331 2010-03-23 11 +332 val_332 2010-03-23 11 +332 val_332 2010-03-23 11 +333 val_333 2010-03-23 11 +333 val_333 2010-03-23 11 +333 val_333 2010-03-23 11 +333 val_333 2010-03-23 11 +335 val_335 2010-03-23 11 +335 val_335 2010-03-23 11 +336 val_336 2010-03-23 11 +336 val_336 2010-03-23 11 +338 val_338 2010-03-23 11 +338 val_338 2010-03-23 11 +339 val_339 2010-03-23 11 +339 val_339 2010-03-23 11 +34 val_34 2010-03-23 11 +34 val_34 2010-03-23 11 +341 val_341 2010-03-23 11 +341 val_341 2010-03-23 11 +342 val_342 2010-03-23 11 +342 val_342 2010-03-23 11 +342 val_342 2010-03-23 11 +342 val_342 2010-03-23 11 +344 val_344 2010-03-23 11 +344 val_344 2010-03-23 11 +344 val_344 2010-03-23 11 +344 val_344 2010-03-23 11 +345 val_345 2010-03-23 11 +345 val_345 2010-03-23 11 +348 val_348 2010-03-23 11 +348 val_348 2010-03-23 11 +348 val_348 2010-03-23 11 +348 val_348 2010-03-23 11 +348 val_348 2010-03-23 11 +348 val_348 2010-03-23 11 +348 val_348 2010-03-23 11 +348 val_348 2010-03-23 11 +348 val_348 2010-03-23 11 +348 val_348 2010-03-23 11 +35 val_35 2010-03-23 11 +35 val_35 2010-03-23 11 +35 val_35 2010-03-23 11 +35 val_35 2010-03-23 11 +35 val_35 2010-03-23 11 +35 val_35 2010-03-23 11 +351 val_351 2010-03-23 11 +351 val_351 2010-03-23 11 +353 val_353 2010-03-23 11 +353 val_353 2010-03-23 11 +353 val_353 2010-03-23 11 +353 val_353 2010-03-23 11 +356 val_356 2010-03-23 11 +356 val_356 2010-03-23 11 +360 val_360 2010-03-23 11 +360 val_360 2010-03-23 11 +362 val_362 2010-03-23 11 +362 val_362 2010-03-23 11 +364 val_364 2010-03-23 11 +364 val_364 2010-03-23 11 +365 val_365 2010-03-23 11 +365 val_365 2010-03-23 11 +366 val_366 2010-03-23 11 +366 val_366 2010-03-23 11 +367 val_367 2010-03-23 11 +367 val_367 2010-03-23 11 +367 val_367 2010-03-23 11 +367 val_367 2010-03-23 11 +368 val_368 2010-03-23 11 +368 val_368 2010-03-23 11 +369 val_369 2010-03-23 11 +369 val_369 2010-03-23 11 +369 val_369 2010-03-23 11 +369 val_369 2010-03-23 11 +369 val_369 2010-03-23 11 +369 val_369 2010-03-23 11 +37 val_37 2010-03-23 11 +37 val_37 2010-03-23 11 +37 val_37 2010-03-23 11 +37 val_37 2010-03-23 11 +373 val_373 2010-03-23 11 +373 val_373 2010-03-23 11 +374 val_374 2010-03-23 11 +374 val_374 2010-03-23 11 +375 val_375 2010-03-23 11 +375 val_375 2010-03-23 11 +377 val_377 2010-03-23 11 +377 val_377 2010-03-23 11 +378 val_378 2010-03-23 11 +378 val_378 2010-03-23 11 +379 val_379 2010-03-23 11 +379 val_379 2010-03-23 11 +382 val_382 2010-03-23 11 +382 val_382 2010-03-23 11 +382 val_382 2010-03-23 11 +382 val_382 2010-03-23 11 +384 val_384 2010-03-23 11 +384 val_384 2010-03-23 11 +384 val_384 2010-03-23 11 +384 val_384 2010-03-23 11 +384 val_384 2010-03-23 11 +384 val_384 2010-03-23 11 +386 val_386 2010-03-23 11 +386 val_386 2010-03-23 11 +389 val_389 2010-03-23 11 +389 val_389 2010-03-23 11 +392 val_392 2010-03-23 11 +392 val_392 2010-03-23 11 +393 val_393 2010-03-23 11 +393 val_393 2010-03-23 11 +394 val_394 2010-03-23 11 +394 val_394 2010-03-23 11 +395 val_395 2010-03-23 11 +395 val_395 2010-03-23 11 +395 val_395 2010-03-23 11 +395 val_395 2010-03-23 11 +396 val_396 2010-03-23 11 +396 val_396 2010-03-23 11 +396 val_396 2010-03-23 11 +396 val_396 2010-03-23 11 +396 val_396 2010-03-23 11 +396 val_396 2010-03-23 11 +397 val_397 2010-03-23 11 +397 val_397 2010-03-23 11 +397 val_397 2010-03-23 11 +397 val_397 2010-03-23 11 +399 val_399 2010-03-23 11 +399 val_399 2010-03-23 11 +399 val_399 2010-03-23 11 +399 val_399 2010-03-23 11 +4 val_4 2010-03-23 11 +4 val_4 2010-03-23 11 +400 val_400 2010-03-23 11 +400 val_400 2010-03-23 11 +401 val_401 2010-03-23 11 +401 val_401 2010-03-23 11 +401 val_401 2010-03-23 11 +401 val_401 2010-03-23 11 +401 val_401 2010-03-23 11 +401 val_401 2010-03-23 11 +401 val_401 2010-03-23 11 +401 val_401 2010-03-23 11 +401 val_401 2010-03-23 11 +401 val_401 2010-03-23 11 +402 val_402 2010-03-23 11 +402 val_402 2010-03-23 11 +403 val_403 2010-03-23 11 +403 val_403 2010-03-23 11 +403 val_403 2010-03-23 11 +403 val_403 2010-03-23 11 +403 val_403 2010-03-23 11 +403 val_403 2010-03-23 11 +404 val_404 2010-03-23 11 +404 val_404 2010-03-23 11 +404 val_404 2010-03-23 11 +404 val_404 2010-03-23 11 +406 val_406 2010-03-23 11 +406 val_406 2010-03-23 11 +406 val_406 2010-03-23 11 +406 val_406 2010-03-23 11 +406 val_406 2010-03-23 11 +406 val_406 2010-03-23 11 +406 val_406 2010-03-23 11 +406 val_406 2010-03-23 11 +407 val_407 2010-03-23 11 +407 val_407 2010-03-23 11 +409 val_409 2010-03-23 11 +409 val_409 2010-03-23 11 +409 val_409 2010-03-23 11 +409 val_409 2010-03-23 11 +409 val_409 2010-03-23 11 +409 val_409 2010-03-23 11 +41 val_41 2010-03-23 11 +41 val_41 2010-03-23 11 +411 val_411 2010-03-23 11 +411 val_411 2010-03-23 11 +413 val_413 2010-03-23 11 +413 val_413 2010-03-23 11 +413 val_413 2010-03-23 11 +413 val_413 2010-03-23 11 +414 val_414 2010-03-23 11 +414 val_414 2010-03-23 11 +414 val_414 2010-03-23 11 +414 val_414 2010-03-23 11 +417 val_417 2010-03-23 11 +417 val_417 2010-03-23 11 +417 val_417 2010-03-23 11 +417 val_417 2010-03-23 11 +417 val_417 2010-03-23 11 +417 val_417 2010-03-23 11 +418 val_418 2010-03-23 11 +418 val_418 2010-03-23 11 +419 val_419 2010-03-23 11 +419 val_419 2010-03-23 11 +42 val_42 2010-03-23 11 +42 val_42 2010-03-23 11 +42 val_42 2010-03-23 11 +42 val_42 2010-03-23 11 +421 val_421 2010-03-23 11 +421 val_421 2010-03-23 11 +424 val_424 2010-03-23 11 +424 val_424 2010-03-23 11 +424 val_424 2010-03-23 11 +424 val_424 2010-03-23 11 +427 val_427 2010-03-23 11 +427 val_427 2010-03-23 11 +429 val_429 2010-03-23 11 +429 val_429 2010-03-23 11 +429 val_429 2010-03-23 11 +429 val_429 2010-03-23 11 +43 val_43 2010-03-23 11 +43 val_43 2010-03-23 11 +430 val_430 2010-03-23 11 +430 val_430 2010-03-23 11 +430 val_430 2010-03-23 11 +430 val_430 2010-03-23 11 +430 val_430 2010-03-23 11 +430 val_430 2010-03-23 11 +431 val_431 2010-03-23 11 +431 val_431 2010-03-23 11 +431 val_431 2010-03-23 11 +431 val_431 2010-03-23 11 +431 val_431 2010-03-23 11 +431 val_431 2010-03-23 11 +432 val_432 2010-03-23 11 +432 val_432 2010-03-23 11 +435 val_435 2010-03-23 11 +435 val_435 2010-03-23 11 +436 val_436 2010-03-23 11 +436 val_436 2010-03-23 11 +437 val_437 2010-03-23 11 +437 val_437 2010-03-23 11 +438 val_438 2010-03-23 11 +438 val_438 2010-03-23 11 +438 val_438 2010-03-23 11 +438 val_438 2010-03-23 11 +438 val_438 2010-03-23 11 +438 val_438 2010-03-23 11 +439 val_439 2010-03-23 11 +439 val_439 2010-03-23 11 +439 val_439 2010-03-23 11 +439 val_439 2010-03-23 11 +44 val_44 2010-03-23 11 +44 val_44 2010-03-23 11 +443 val_443 2010-03-23 11 +443 val_443 2010-03-23 11 +444 val_444 2010-03-23 11 +444 val_444 2010-03-23 11 +446 val_446 2010-03-23 11 +446 val_446 2010-03-23 11 +448 val_448 2010-03-23 11 +448 val_448 2010-03-23 11 +449 val_449 2010-03-23 11 +449 val_449 2010-03-23 11 +452 val_452 2010-03-23 11 +452 val_452 2010-03-23 11 +453 val_453 2010-03-23 11 +453 val_453 2010-03-23 11 +454 val_454 2010-03-23 11 +454 val_454 2010-03-23 11 +454 val_454 2010-03-23 11 +454 val_454 2010-03-23 11 +454 val_454 2010-03-23 11 +454 val_454 2010-03-23 11 +455 val_455 2010-03-23 11 +455 val_455 2010-03-23 11 +457 val_457 2010-03-23 11 +457 val_457 2010-03-23 11 +458 val_458 2010-03-23 11 +458 val_458 2010-03-23 11 +458 val_458 2010-03-23 11 +458 val_458 2010-03-23 11 +459 val_459 2010-03-23 11 +459 val_459 2010-03-23 11 +459 val_459 2010-03-23 11 +459 val_459 2010-03-23 11 +460 val_460 2010-03-23 11 +460 val_460 2010-03-23 11 +462 val_462 2010-03-23 11 +462 val_462 2010-03-23 11 +462 val_462 2010-03-23 11 +462 val_462 2010-03-23 11 +463 val_463 2010-03-23 11 +463 val_463 2010-03-23 11 +463 val_463 2010-03-23 11 +463 val_463 2010-03-23 11 +466 val_466 2010-03-23 11 +466 val_466 2010-03-23 11 +466 val_466 2010-03-23 11 +466 val_466 2010-03-23 11 +466 val_466 2010-03-23 11 +466 val_466 2010-03-23 11 +467 val_467 2010-03-23 11 +467 val_467 2010-03-23 11 +468 val_468 2010-03-23 11 +468 val_468 2010-03-23 11 +468 val_468 2010-03-23 11 +468 val_468 2010-03-23 11 +468 val_468 2010-03-23 11 +468 val_468 2010-03-23 11 +468 val_468 2010-03-23 11 +468 val_468 2010-03-23 11 +469 val_469 2010-03-23 11 +469 val_469 2010-03-23 11 +469 val_469 2010-03-23 11 +469 val_469 2010-03-23 11 +469 val_469 2010-03-23 11 +469 val_469 2010-03-23 11 +469 val_469 2010-03-23 11 +469 val_469 2010-03-23 11 +469 val_469 2010-03-23 11 +469 val_469 2010-03-23 11 +47 val_47 2010-03-23 11 +47 val_47 2010-03-23 11 +470 val_470 2010-03-23 11 +470 val_470 2010-03-23 11 +472 val_472 2010-03-23 11 +472 val_472 2010-03-23 11 +475 val_475 2010-03-23 11 +475 val_475 2010-03-23 11 +477 val_477 2010-03-23 11 +477 val_477 2010-03-23 11 +478 val_478 2010-03-23 11 +478 val_478 2010-03-23 11 +478 val_478 2010-03-23 11 +478 val_478 2010-03-23 11 +479 val_479 2010-03-23 11 +479 val_479 2010-03-23 11 +480 val_480 2010-03-23 11 +480 val_480 2010-03-23 11 +480 val_480 2010-03-23 11 +480 val_480 2010-03-23 11 +480 val_480 2010-03-23 11 +480 val_480 2010-03-23 11 +481 val_481 2010-03-23 11 +481 val_481 2010-03-23 11 +482 val_482 2010-03-23 11 +482 val_482 2010-03-23 11 +483 val_483 2010-03-23 11 +483 val_483 2010-03-23 11 +484 val_484 2010-03-23 11 +484 val_484 2010-03-23 11 +485 val_485 2010-03-23 11 +485 val_485 2010-03-23 11 +487 val_487 2010-03-23 11 +487 val_487 2010-03-23 11 +489 val_489 2010-03-23 11 +489 val_489 2010-03-23 11 +489 val_489 2010-03-23 11 +489 val_489 2010-03-23 11 +489 val_489 2010-03-23 11 +489 val_489 2010-03-23 11 +489 val_489 2010-03-23 11 +489 val_489 2010-03-23 11 +490 val_490 2010-03-23 11 +490 val_490 2010-03-23 11 +491 val_491 2010-03-23 11 +491 val_491 2010-03-23 11 +492 val_492 2010-03-23 11 +492 val_492 2010-03-23 11 +492 val_492 2010-03-23 11 +492 val_492 2010-03-23 11 +493 val_493 2010-03-23 11 +493 val_493 2010-03-23 11 +494 val_494 2010-03-23 11 +494 val_494 2010-03-23 11 +495 val_495 2010-03-23 11 +495 val_495 2010-03-23 11 +496 val_496 2010-03-23 11 +496 val_496 2010-03-23 11 +497 val_497 2010-03-23 11 +497 val_497 2010-03-23 11 +498 val_498 2010-03-23 11 +498 val_498 2010-03-23 11 +498 val_498 2010-03-23 11 +498 val_498 2010-03-23 11 +498 val_498 2010-03-23 11 +498 val_498 2010-03-23 11 +5 val_5 2010-03-23 11 +5 val_5 2010-03-23 11 +5 val_5 2010-03-23 11 +5 val_5 2010-03-23 11 +5 val_5 2010-03-23 11 +5 val_5 2010-03-23 11 +51 val_51 2010-03-23 11 +51 val_51 2010-03-23 11 +51 val_51 2010-03-23 11 +51 val_51 2010-03-23 11 +53 val_53 2010-03-23 11 +53 val_53 2010-03-23 11 +54 val_54 2010-03-23 11 +54 val_54 2010-03-23 11 +57 val_57 2010-03-23 11 +57 val_57 2010-03-23 11 +58 val_58 2010-03-23 11 +58 val_58 2010-03-23 11 +58 val_58 2010-03-23 11 +58 val_58 2010-03-23 11 +64 val_64 2010-03-23 11 +64 val_64 2010-03-23 11 +65 val_65 2010-03-23 11 +65 val_65 2010-03-23 11 +66 val_66 2010-03-23 11 +66 val_66 2010-03-23 11 +67 val_67 2010-03-23 11 +67 val_67 2010-03-23 11 +67 val_67 2010-03-23 11 +67 val_67 2010-03-23 11 +69 val_69 2010-03-23 11 +69 val_69 2010-03-23 11 +70 val_70 2010-03-23 11 +70 val_70 2010-03-23 11 +70 val_70 2010-03-23 11 +70 val_70 2010-03-23 11 +70 val_70 2010-03-23 11 +70 val_70 2010-03-23 11 +72 val_72 2010-03-23 11 +72 val_72 2010-03-23 11 +72 val_72 2010-03-23 11 +72 val_72 2010-03-23 11 +74 val_74 2010-03-23 11 +74 val_74 2010-03-23 11 +76 val_76 2010-03-23 11 +76 val_76 2010-03-23 11 +76 val_76 2010-03-23 11 +76 val_76 2010-03-23 11 +77 val_77 2010-03-23 11 +77 val_77 2010-03-23 11 +78 val_78 2010-03-23 11 +78 val_78 2010-03-23 11 +8 val_8 2010-03-23 11 +8 val_8 2010-03-23 11 +80 val_80 2010-03-23 11 +80 val_80 2010-03-23 11 +82 val_82 2010-03-23 11 +82 val_82 2010-03-23 11 +83 val_83 2010-03-23 11 +83 val_83 2010-03-23 11 +83 val_83 2010-03-23 11 +83 val_83 2010-03-23 11 +84 val_84 2010-03-23 11 +84 val_84 2010-03-23 11 +84 val_84 2010-03-23 11 +84 val_84 2010-03-23 11 +85 val_85 2010-03-23 11 +85 val_85 2010-03-23 11 +86 val_86 2010-03-23 11 +86 val_86 2010-03-23 11 +87 val_87 2010-03-23 11 +87 val_87 2010-03-23 11 +9 val_9 2010-03-23 11 +9 val_9 2010-03-23 11 +90 val_90 2010-03-23 11 +90 val_90 2010-03-23 11 +90 val_90 2010-03-23 11 +90 val_90 2010-03-23 11 +90 val_90 2010-03-23 11 +90 val_90 2010-03-23 11 +92 val_92 2010-03-23 11 +92 val_92 2010-03-23 11 +95 val_95 2010-03-23 11 +95 val_95 2010-03-23 11 +95 val_95 2010-03-23 11 +95 val_95 2010-03-23 11 +96 val_96 2010-03-23 11 +96 val_96 2010-03-23 11 +97 val_97 2010-03-23 11 +97 val_97 2010-03-23 11 +97 val_97 2010-03-23 11 +97 val_97 2010-03-23 11 +98 val_98 2010-03-23 11 +98 val_98 2010-03-23 11 +98 val_98 2010-03-23 11 +98 val_98 2010-03-23 11 +PREHOOK: query: select * from nzhang_part_bucket where ds='2010-03-23' and hr='12' order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@nzhang_part_bucket +PREHOOK: Input: default@nzhang_part_bucket@ds=2010-03-23/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select * from nzhang_part_bucket where ds='2010-03-23' and hr='12' order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@nzhang_part_bucket +POSTHOOK: Input: default@nzhang_part_bucket@ds=2010-03-23/hr=12 +#### A masked pattern was here #### +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +0 val_0 2010-03-23 12 +0 val_0 2010-03-23 12 +0 val_0 2010-03-23 12 +0 val_0 2010-03-23 12 +0 val_0 2010-03-23 12 +0 val_0 2010-03-23 12 +10 val_10 2010-03-23 12 +10 val_10 2010-03-23 12 +100 val_100 2010-03-23 12 +100 val_100 2010-03-23 12 +100 val_100 2010-03-23 12 +100 val_100 2010-03-23 12 +103 val_103 2010-03-23 12 +103 val_103 2010-03-23 12 +103 val_103 2010-03-23 12 +103 val_103 2010-03-23 12 +104 val_104 2010-03-23 12 +104 val_104 2010-03-23 12 +104 val_104 2010-03-23 12 +104 val_104 2010-03-23 12 +105 val_105 2010-03-23 12 +105 val_105 2010-03-23 12 +11 val_11 2010-03-23 12 +11 val_11 2010-03-23 12 +111 val_111 2010-03-23 12 +111 val_111 2010-03-23 12 +113 val_113 2010-03-23 12 +113 val_113 2010-03-23 12 +113 val_113 2010-03-23 12 +113 val_113 2010-03-23 12 +114 val_114 2010-03-23 12 +114 val_114 2010-03-23 12 +116 val_116 2010-03-23 12 +116 val_116 2010-03-23 12 +118 val_118 2010-03-23 12 +118 val_118 2010-03-23 12 +118 val_118 2010-03-23 12 +118 val_118 2010-03-23 12 +119 val_119 2010-03-23 12 +119 val_119 2010-03-23 12 +119 val_119 2010-03-23 12 +119 val_119 2010-03-23 12 +119 val_119 2010-03-23 12 +119 val_119 2010-03-23 12 +12 val_12 2010-03-23 12 +12 val_12 2010-03-23 12 +12 val_12 2010-03-23 12 +12 val_12 2010-03-23 12 +120 val_120 2010-03-23 12 +120 val_120 2010-03-23 12 +120 val_120 2010-03-23 12 +120 val_120 2010-03-23 12 +125 val_125 2010-03-23 12 +125 val_125 2010-03-23 12 +125 val_125 2010-03-23 12 +125 val_125 2010-03-23 12 +126 val_126 2010-03-23 12 +126 val_126 2010-03-23 12 +128 val_128 2010-03-23 12 +128 val_128 2010-03-23 12 +128 val_128 2010-03-23 12 +128 val_128 2010-03-23 12 +128 val_128 2010-03-23 12 +128 val_128 2010-03-23 12 +129 val_129 2010-03-23 12 +129 val_129 2010-03-23 12 +129 val_129 2010-03-23 12 +129 val_129 2010-03-23 12 +131 val_131 2010-03-23 12 +131 val_131 2010-03-23 12 +133 val_133 2010-03-23 12 +133 val_133 2010-03-23 12 +134 val_134 2010-03-23 12 +134 val_134 2010-03-23 12 +134 val_134 2010-03-23 12 +134 val_134 2010-03-23 12 +136 val_136 2010-03-23 12 +136 val_136 2010-03-23 12 +137 val_137 2010-03-23 12 +137 val_137 2010-03-23 12 +137 val_137 2010-03-23 12 +137 val_137 2010-03-23 12 +138 val_138 2010-03-23 12 +138 val_138 2010-03-23 12 +138 val_138 2010-03-23 12 +138 val_138 2010-03-23 12 +138 val_138 2010-03-23 12 +138 val_138 2010-03-23 12 +138 val_138 2010-03-23 12 +138 val_138 2010-03-23 12 +143 val_143 2010-03-23 12 +143 val_143 2010-03-23 12 +145 val_145 2010-03-23 12 +145 val_145 2010-03-23 12 +146 val_146 2010-03-23 12 +146 val_146 2010-03-23 12 +146 val_146 2010-03-23 12 +146 val_146 2010-03-23 12 +149 val_149 2010-03-23 12 +149 val_149 2010-03-23 12 +149 val_149 2010-03-23 12 +149 val_149 2010-03-23 12 +15 val_15 2010-03-23 12 +15 val_15 2010-03-23 12 +15 val_15 2010-03-23 12 +15 val_15 2010-03-23 12 +150 val_150 2010-03-23 12 +150 val_150 2010-03-23 12 +152 val_152 2010-03-23 12 +152 val_152 2010-03-23 12 +152 val_152 2010-03-23 12 +152 val_152 2010-03-23 12 +153 val_153 2010-03-23 12 +153 val_153 2010-03-23 12 +155 val_155 2010-03-23 12 +155 val_155 2010-03-23 12 +156 val_156 2010-03-23 12 +156 val_156 2010-03-23 12 +157 val_157 2010-03-23 12 +157 val_157 2010-03-23 12 +158 val_158 2010-03-23 12 +158 val_158 2010-03-23 12 +160 val_160 2010-03-23 12 +160 val_160 2010-03-23 12 +162 val_162 2010-03-23 12 +162 val_162 2010-03-23 12 +163 val_163 2010-03-23 12 +163 val_163 2010-03-23 12 +164 val_164 2010-03-23 12 +164 val_164 2010-03-23 12 +164 val_164 2010-03-23 12 +164 val_164 2010-03-23 12 +165 val_165 2010-03-23 12 +165 val_165 2010-03-23 12 +165 val_165 2010-03-23 12 +165 val_165 2010-03-23 12 +166 val_166 2010-03-23 12 +166 val_166 2010-03-23 12 +167 val_167 2010-03-23 12 +167 val_167 2010-03-23 12 +167 val_167 2010-03-23 12 +167 val_167 2010-03-23 12 +167 val_167 2010-03-23 12 +167 val_167 2010-03-23 12 +168 val_168 2010-03-23 12 +168 val_168 2010-03-23 12 +169 val_169 2010-03-23 12 +169 val_169 2010-03-23 12 +169 val_169 2010-03-23 12 +169 val_169 2010-03-23 12 +169 val_169 2010-03-23 12 +169 val_169 2010-03-23 12 +169 val_169 2010-03-23 12 +169 val_169 2010-03-23 12 +17 val_17 2010-03-23 12 +17 val_17 2010-03-23 12 +170 val_170 2010-03-23 12 +170 val_170 2010-03-23 12 +172 val_172 2010-03-23 12 +172 val_172 2010-03-23 12 +172 val_172 2010-03-23 12 +172 val_172 2010-03-23 12 +174 val_174 2010-03-23 12 +174 val_174 2010-03-23 12 +174 val_174 2010-03-23 12 +174 val_174 2010-03-23 12 +175 val_175 2010-03-23 12 +175 val_175 2010-03-23 12 +175 val_175 2010-03-23 12 +175 val_175 2010-03-23 12 +176 val_176 2010-03-23 12 +176 val_176 2010-03-23 12 +176 val_176 2010-03-23 12 +176 val_176 2010-03-23 12 +177 val_177 2010-03-23 12 +177 val_177 2010-03-23 12 +178 val_178 2010-03-23 12 +178 val_178 2010-03-23 12 +179 val_179 2010-03-23 12 +179 val_179 2010-03-23 12 +179 val_179 2010-03-23 12 +179 val_179 2010-03-23 12 +18 val_18 2010-03-23 12 +18 val_18 2010-03-23 12 +18 val_18 2010-03-23 12 +18 val_18 2010-03-23 12 +180 val_180 2010-03-23 12 +180 val_180 2010-03-23 12 +181 val_181 2010-03-23 12 +181 val_181 2010-03-23 12 +183 val_183 2010-03-23 12 +183 val_183 2010-03-23 12 +186 val_186 2010-03-23 12 +186 val_186 2010-03-23 12 +187 val_187 2010-03-23 12 +187 val_187 2010-03-23 12 +187 val_187 2010-03-23 12 +187 val_187 2010-03-23 12 +187 val_187 2010-03-23 12 +187 val_187 2010-03-23 12 +189 val_189 2010-03-23 12 +189 val_189 2010-03-23 12 +19 val_19 2010-03-23 12 +19 val_19 2010-03-23 12 +190 val_190 2010-03-23 12 +190 val_190 2010-03-23 12 +191 val_191 2010-03-23 12 +191 val_191 2010-03-23 12 +191 val_191 2010-03-23 12 +191 val_191 2010-03-23 12 +192 val_192 2010-03-23 12 +192 val_192 2010-03-23 12 +193 val_193 2010-03-23 12 +193 val_193 2010-03-23 12 +193 val_193 2010-03-23 12 +193 val_193 2010-03-23 12 +193 val_193 2010-03-23 12 +193 val_193 2010-03-23 12 +194 val_194 2010-03-23 12 +194 val_194 2010-03-23 12 +195 val_195 2010-03-23 12 +195 val_195 2010-03-23 12 +195 val_195 2010-03-23 12 +195 val_195 2010-03-23 12 +196 val_196 2010-03-23 12 +196 val_196 2010-03-23 12 +197 val_197 2010-03-23 12 +197 val_197 2010-03-23 12 +197 val_197 2010-03-23 12 +197 val_197 2010-03-23 12 +199 val_199 2010-03-23 12 +199 val_199 2010-03-23 12 +199 val_199 2010-03-23 12 +199 val_199 2010-03-23 12 +199 val_199 2010-03-23 12 +199 val_199 2010-03-23 12 +2 val_2 2010-03-23 12 +2 val_2 2010-03-23 12 +20 val_20 2010-03-23 12 +20 val_20 2010-03-23 12 +200 val_200 2010-03-23 12 +200 val_200 2010-03-23 12 +200 val_200 2010-03-23 12 +200 val_200 2010-03-23 12 +201 val_201 2010-03-23 12 +201 val_201 2010-03-23 12 +202 val_202 2010-03-23 12 +202 val_202 2010-03-23 12 +203 val_203 2010-03-23 12 +203 val_203 2010-03-23 12 +203 val_203 2010-03-23 12 +203 val_203 2010-03-23 12 +205 val_205 2010-03-23 12 +205 val_205 2010-03-23 12 +205 val_205 2010-03-23 12 +205 val_205 2010-03-23 12 +207 val_207 2010-03-23 12 +207 val_207 2010-03-23 12 +207 val_207 2010-03-23 12 +207 val_207 2010-03-23 12 +208 val_208 2010-03-23 12 +208 val_208 2010-03-23 12 +208 val_208 2010-03-23 12 +208 val_208 2010-03-23 12 +208 val_208 2010-03-23 12 +208 val_208 2010-03-23 12 +209 val_209 2010-03-23 12 +209 val_209 2010-03-23 12 +209 val_209 2010-03-23 12 +209 val_209 2010-03-23 12 +213 val_213 2010-03-23 12 +213 val_213 2010-03-23 12 +213 val_213 2010-03-23 12 +213 val_213 2010-03-23 12 +214 val_214 2010-03-23 12 +214 val_214 2010-03-23 12 +216 val_216 2010-03-23 12 +216 val_216 2010-03-23 12 +216 val_216 2010-03-23 12 +216 val_216 2010-03-23 12 +217 val_217 2010-03-23 12 +217 val_217 2010-03-23 12 +217 val_217 2010-03-23 12 +217 val_217 2010-03-23 12 +218 val_218 2010-03-23 12 +218 val_218 2010-03-23 12 +219 val_219 2010-03-23 12 +219 val_219 2010-03-23 12 +219 val_219 2010-03-23 12 +219 val_219 2010-03-23 12 +221 val_221 2010-03-23 12 +221 val_221 2010-03-23 12 +221 val_221 2010-03-23 12 +221 val_221 2010-03-23 12 +222 val_222 2010-03-23 12 +222 val_222 2010-03-23 12 +223 val_223 2010-03-23 12 +223 val_223 2010-03-23 12 +223 val_223 2010-03-23 12 +223 val_223 2010-03-23 12 +224 val_224 2010-03-23 12 +224 val_224 2010-03-23 12 +224 val_224 2010-03-23 12 +224 val_224 2010-03-23 12 +226 val_226 2010-03-23 12 +226 val_226 2010-03-23 12 +228 val_228 2010-03-23 12 +228 val_228 2010-03-23 12 +229 val_229 2010-03-23 12 +229 val_229 2010-03-23 12 +229 val_229 2010-03-23 12 +229 val_229 2010-03-23 12 +230 val_230 2010-03-23 12 +230 val_230 2010-03-23 12 +230 val_230 2010-03-23 12 +230 val_230 2010-03-23 12 +230 val_230 2010-03-23 12 +230 val_230 2010-03-23 12 +230 val_230 2010-03-23 12 +230 val_230 2010-03-23 12 +230 val_230 2010-03-23 12 +230 val_230 2010-03-23 12 +233 val_233 2010-03-23 12 +233 val_233 2010-03-23 12 +233 val_233 2010-03-23 12 +233 val_233 2010-03-23 12 +235 val_235 2010-03-23 12 +235 val_235 2010-03-23 12 +237 val_237 2010-03-23 12 +237 val_237 2010-03-23 12 +237 val_237 2010-03-23 12 +237 val_237 2010-03-23 12 +238 val_238 2010-03-23 12 +238 val_238 2010-03-23 12 +238 val_238 2010-03-23 12 +238 val_238 2010-03-23 12 +239 val_239 2010-03-23 12 +239 val_239 2010-03-23 12 +239 val_239 2010-03-23 12 +239 val_239 2010-03-23 12 +24 val_24 2010-03-23 12 +24 val_24 2010-03-23 12 +24 val_24 2010-03-23 12 +24 val_24 2010-03-23 12 +241 val_241 2010-03-23 12 +241 val_241 2010-03-23 12 +242 val_242 2010-03-23 12 +242 val_242 2010-03-23 12 +242 val_242 2010-03-23 12 +242 val_242 2010-03-23 12 +244 val_244 2010-03-23 12 +244 val_244 2010-03-23 12 +247 val_247 2010-03-23 12 +247 val_247 2010-03-23 12 +248 val_248 2010-03-23 12 +248 val_248 2010-03-23 12 +249 val_249 2010-03-23 12 +249 val_249 2010-03-23 12 +252 val_252 2010-03-23 12 +252 val_252 2010-03-23 12 +255 val_255 2010-03-23 12 +255 val_255 2010-03-23 12 +255 val_255 2010-03-23 12 +255 val_255 2010-03-23 12 +256 val_256 2010-03-23 12 +256 val_256 2010-03-23 12 +256 val_256 2010-03-23 12 +256 val_256 2010-03-23 12 +257 val_257 2010-03-23 12 +257 val_257 2010-03-23 12 +258 val_258 2010-03-23 12 +258 val_258 2010-03-23 12 +26 val_26 2010-03-23 12 +26 val_26 2010-03-23 12 +26 val_26 2010-03-23 12 +26 val_26 2010-03-23 12 +260 val_260 2010-03-23 12 +260 val_260 2010-03-23 12 +262 val_262 2010-03-23 12 +262 val_262 2010-03-23 12 +263 val_263 2010-03-23 12 +263 val_263 2010-03-23 12 +265 val_265 2010-03-23 12 +265 val_265 2010-03-23 12 +265 val_265 2010-03-23 12 +265 val_265 2010-03-23 12 +266 val_266 2010-03-23 12 +266 val_266 2010-03-23 12 +27 val_27 2010-03-23 12 +27 val_27 2010-03-23 12 +272 val_272 2010-03-23 12 +272 val_272 2010-03-23 12 +272 val_272 2010-03-23 12 +272 val_272 2010-03-23 12 +273 val_273 2010-03-23 12 +273 val_273 2010-03-23 12 +273 val_273 2010-03-23 12 +273 val_273 2010-03-23 12 +273 val_273 2010-03-23 12 +273 val_273 2010-03-23 12 +274 val_274 2010-03-23 12 +274 val_274 2010-03-23 12 +275 val_275 2010-03-23 12 +275 val_275 2010-03-23 12 +277 val_277 2010-03-23 12 +277 val_277 2010-03-23 12 +277 val_277 2010-03-23 12 +277 val_277 2010-03-23 12 +277 val_277 2010-03-23 12 +277 val_277 2010-03-23 12 +277 val_277 2010-03-23 12 +277 val_277 2010-03-23 12 +278 val_278 2010-03-23 12 +278 val_278 2010-03-23 12 +278 val_278 2010-03-23 12 +278 val_278 2010-03-23 12 +28 val_28 2010-03-23 12 +28 val_28 2010-03-23 12 +280 val_280 2010-03-23 12 +280 val_280 2010-03-23 12 +280 val_280 2010-03-23 12 +280 val_280 2010-03-23 12 +281 val_281 2010-03-23 12 +281 val_281 2010-03-23 12 +281 val_281 2010-03-23 12 +281 val_281 2010-03-23 12 +282 val_282 2010-03-23 12 +282 val_282 2010-03-23 12 +282 val_282 2010-03-23 12 +282 val_282 2010-03-23 12 +283 val_283 2010-03-23 12 +283 val_283 2010-03-23 12 +284 val_284 2010-03-23 12 +284 val_284 2010-03-23 12 +285 val_285 2010-03-23 12 +285 val_285 2010-03-23 12 +286 val_286 2010-03-23 12 +286 val_286 2010-03-23 12 +287 val_287 2010-03-23 12 +287 val_287 2010-03-23 12 +288 val_288 2010-03-23 12 +288 val_288 2010-03-23 12 +288 val_288 2010-03-23 12 +288 val_288 2010-03-23 12 +289 val_289 2010-03-23 12 +289 val_289 2010-03-23 12 +291 val_291 2010-03-23 12 +291 val_291 2010-03-23 12 +292 val_292 2010-03-23 12 +292 val_292 2010-03-23 12 +296 val_296 2010-03-23 12 +296 val_296 2010-03-23 12 +298 val_298 2010-03-23 12 +298 val_298 2010-03-23 12 +298 val_298 2010-03-23 12 +298 val_298 2010-03-23 12 +298 val_298 2010-03-23 12 +298 val_298 2010-03-23 12 +30 val_30 2010-03-23 12 +30 val_30 2010-03-23 12 +302 val_302 2010-03-23 12 +302 val_302 2010-03-23 12 +305 val_305 2010-03-23 12 +305 val_305 2010-03-23 12 +306 val_306 2010-03-23 12 +306 val_306 2010-03-23 12 +307 val_307 2010-03-23 12 +307 val_307 2010-03-23 12 +307 val_307 2010-03-23 12 +307 val_307 2010-03-23 12 +308 val_308 2010-03-23 12 +308 val_308 2010-03-23 12 +309 val_309 2010-03-23 12 +309 val_309 2010-03-23 12 +309 val_309 2010-03-23 12 +309 val_309 2010-03-23 12 +310 val_310 2010-03-23 12 +310 val_310 2010-03-23 12 +311 val_311 2010-03-23 12 +311 val_311 2010-03-23 12 +311 val_311 2010-03-23 12 +311 val_311 2010-03-23 12 +311 val_311 2010-03-23 12 +311 val_311 2010-03-23 12 +315 val_315 2010-03-23 12 +315 val_315 2010-03-23 12 +316 val_316 2010-03-23 12 +316 val_316 2010-03-23 12 +316 val_316 2010-03-23 12 +316 val_316 2010-03-23 12 +316 val_316 2010-03-23 12 +316 val_316 2010-03-23 12 +317 val_317 2010-03-23 12 +317 val_317 2010-03-23 12 +317 val_317 2010-03-23 12 +317 val_317 2010-03-23 12 +318 val_318 2010-03-23 12 +318 val_318 2010-03-23 12 +318 val_318 2010-03-23 12 +318 val_318 2010-03-23 12 +318 val_318 2010-03-23 12 +318 val_318 2010-03-23 12 +321 val_321 2010-03-23 12 +321 val_321 2010-03-23 12 +321 val_321 2010-03-23 12 +321 val_321 2010-03-23 12 +322 val_322 2010-03-23 12 +322 val_322 2010-03-23 12 +322 val_322 2010-03-23 12 +322 val_322 2010-03-23 12 +323 val_323 2010-03-23 12 +323 val_323 2010-03-23 12 +325 val_325 2010-03-23 12 +325 val_325 2010-03-23 12 +325 val_325 2010-03-23 12 +325 val_325 2010-03-23 12 +327 val_327 2010-03-23 12 +327 val_327 2010-03-23 12 +327 val_327 2010-03-23 12 +327 val_327 2010-03-23 12 +327 val_327 2010-03-23 12 +327 val_327 2010-03-23 12 +33 val_33 2010-03-23 12 +33 val_33 2010-03-23 12 +331 val_331 2010-03-23 12 +331 val_331 2010-03-23 12 +331 val_331 2010-03-23 12 +331 val_331 2010-03-23 12 +332 val_332 2010-03-23 12 +332 val_332 2010-03-23 12 +333 val_333 2010-03-23 12 +333 val_333 2010-03-23 12 +333 val_333 2010-03-23 12 +333 val_333 2010-03-23 12 +335 val_335 2010-03-23 12 +335 val_335 2010-03-23 12 +336 val_336 2010-03-23 12 +336 val_336 2010-03-23 12 +338 val_338 2010-03-23 12 +338 val_338 2010-03-23 12 +339 val_339 2010-03-23 12 +339 val_339 2010-03-23 12 +34 val_34 2010-03-23 12 +34 val_34 2010-03-23 12 +341 val_341 2010-03-23 12 +341 val_341 2010-03-23 12 +342 val_342 2010-03-23 12 +342 val_342 2010-03-23 12 +342 val_342 2010-03-23 12 +342 val_342 2010-03-23 12 +344 val_344 2010-03-23 12 +344 val_344 2010-03-23 12 +344 val_344 2010-03-23 12 +344 val_344 2010-03-23 12 +345 val_345 2010-03-23 12 +345 val_345 2010-03-23 12 +348 val_348 2010-03-23 12 +348 val_348 2010-03-23 12 +348 val_348 2010-03-23 12 +348 val_348 2010-03-23 12 +348 val_348 2010-03-23 12 +348 val_348 2010-03-23 12 +348 val_348 2010-03-23 12 +348 val_348 2010-03-23 12 +348 val_348 2010-03-23 12 +348 val_348 2010-03-23 12 +35 val_35 2010-03-23 12 +35 val_35 2010-03-23 12 +35 val_35 2010-03-23 12 +35 val_35 2010-03-23 12 +35 val_35 2010-03-23 12 +35 val_35 2010-03-23 12 +351 val_351 2010-03-23 12 +351 val_351 2010-03-23 12 +353 val_353 2010-03-23 12 +353 val_353 2010-03-23 12 +353 val_353 2010-03-23 12 +353 val_353 2010-03-23 12 +356 val_356 2010-03-23 12 +356 val_356 2010-03-23 12 +360 val_360 2010-03-23 12 +360 val_360 2010-03-23 12 +362 val_362 2010-03-23 12 +362 val_362 2010-03-23 12 +364 val_364 2010-03-23 12 +364 val_364 2010-03-23 12 +365 val_365 2010-03-23 12 +365 val_365 2010-03-23 12 +366 val_366 2010-03-23 12 +366 val_366 2010-03-23 12 +367 val_367 2010-03-23 12 +367 val_367 2010-03-23 12 +367 val_367 2010-03-23 12 +367 val_367 2010-03-23 12 +368 val_368 2010-03-23 12 +368 val_368 2010-03-23 12 +369 val_369 2010-03-23 12 +369 val_369 2010-03-23 12 +369 val_369 2010-03-23 12 +369 val_369 2010-03-23 12 +369 val_369 2010-03-23 12 +369 val_369 2010-03-23 12 +37 val_37 2010-03-23 12 +37 val_37 2010-03-23 12 +37 val_37 2010-03-23 12 +37 val_37 2010-03-23 12 +373 val_373 2010-03-23 12 +373 val_373 2010-03-23 12 +374 val_374 2010-03-23 12 +374 val_374 2010-03-23 12 +375 val_375 2010-03-23 12 +375 val_375 2010-03-23 12 +377 val_377 2010-03-23 12 +377 val_377 2010-03-23 12 +378 val_378 2010-03-23 12 +378 val_378 2010-03-23 12 +379 val_379 2010-03-23 12 +379 val_379 2010-03-23 12 +382 val_382 2010-03-23 12 +382 val_382 2010-03-23 12 +382 val_382 2010-03-23 12 +382 val_382 2010-03-23 12 +384 val_384 2010-03-23 12 +384 val_384 2010-03-23 12 +384 val_384 2010-03-23 12 +384 val_384 2010-03-23 12 +384 val_384 2010-03-23 12 +384 val_384 2010-03-23 12 +386 val_386 2010-03-23 12 +386 val_386 2010-03-23 12 +389 val_389 2010-03-23 12 +389 val_389 2010-03-23 12 +392 val_392 2010-03-23 12 +392 val_392 2010-03-23 12 +393 val_393 2010-03-23 12 +393 val_393 2010-03-23 12 +394 val_394 2010-03-23 12 +394 val_394 2010-03-23 12 +395 val_395 2010-03-23 12 +395 val_395 2010-03-23 12 +395 val_395 2010-03-23 12 +395 val_395 2010-03-23 12 +396 val_396 2010-03-23 12 +396 val_396 2010-03-23 12 +396 val_396 2010-03-23 12 +396 val_396 2010-03-23 12 +396 val_396 2010-03-23 12 +396 val_396 2010-03-23 12 +397 val_397 2010-03-23 12 +397 val_397 2010-03-23 12 +397 val_397 2010-03-23 12 +397 val_397 2010-03-23 12 +399 val_399 2010-03-23 12 +399 val_399 2010-03-23 12 +399 val_399 2010-03-23 12 +399 val_399 2010-03-23 12 +4 val_4 2010-03-23 12 +4 val_4 2010-03-23 12 +400 val_400 2010-03-23 12 +400 val_400 2010-03-23 12 +401 val_401 2010-03-23 12 +401 val_401 2010-03-23 12 +401 val_401 2010-03-23 12 +401 val_401 2010-03-23 12 +401 val_401 2010-03-23 12 +401 val_401 2010-03-23 12 +401 val_401 2010-03-23 12 +401 val_401 2010-03-23 12 +401 val_401 2010-03-23 12 +401 val_401 2010-03-23 12 +402 val_402 2010-03-23 12 +402 val_402 2010-03-23 12 +403 val_403 2010-03-23 12 +403 val_403 2010-03-23 12 +403 val_403 2010-03-23 12 +403 val_403 2010-03-23 12 +403 val_403 2010-03-23 12 +403 val_403 2010-03-23 12 +404 val_404 2010-03-23 12 +404 val_404 2010-03-23 12 +404 val_404 2010-03-23 12 +404 val_404 2010-03-23 12 +406 val_406 2010-03-23 12 +406 val_406 2010-03-23 12 +406 val_406 2010-03-23 12 +406 val_406 2010-03-23 12 +406 val_406 2010-03-23 12 +406 val_406 2010-03-23 12 +406 val_406 2010-03-23 12 +406 val_406 2010-03-23 12 +407 val_407 2010-03-23 12 +407 val_407 2010-03-23 12 +409 val_409 2010-03-23 12 +409 val_409 2010-03-23 12 +409 val_409 2010-03-23 12 +409 val_409 2010-03-23 12 +409 val_409 2010-03-23 12 +409 val_409 2010-03-23 12 +41 val_41 2010-03-23 12 +41 val_41 2010-03-23 12 +411 val_411 2010-03-23 12 +411 val_411 2010-03-23 12 +413 val_413 2010-03-23 12 +413 val_413 2010-03-23 12 +413 val_413 2010-03-23 12 +413 val_413 2010-03-23 12 +414 val_414 2010-03-23 12 +414 val_414 2010-03-23 12 +414 val_414 2010-03-23 12 +414 val_414 2010-03-23 12 +417 val_417 2010-03-23 12 +417 val_417 2010-03-23 12 +417 val_417 2010-03-23 12 +417 val_417 2010-03-23 12 +417 val_417 2010-03-23 12 +417 val_417 2010-03-23 12 +418 val_418 2010-03-23 12 +418 val_418 2010-03-23 12 +419 val_419 2010-03-23 12 +419 val_419 2010-03-23 12 +42 val_42 2010-03-23 12 +42 val_42 2010-03-23 12 +42 val_42 2010-03-23 12 +42 val_42 2010-03-23 12 +421 val_421 2010-03-23 12 +421 val_421 2010-03-23 12 +424 val_424 2010-03-23 12 +424 val_424 2010-03-23 12 +424 val_424 2010-03-23 12 +424 val_424 2010-03-23 12 +427 val_427 2010-03-23 12 +427 val_427 2010-03-23 12 +429 val_429 2010-03-23 12 +429 val_429 2010-03-23 12 +429 val_429 2010-03-23 12 +429 val_429 2010-03-23 12 +43 val_43 2010-03-23 12 +43 val_43 2010-03-23 12 +430 val_430 2010-03-23 12 +430 val_430 2010-03-23 12 +430 val_430 2010-03-23 12 +430 val_430 2010-03-23 12 +430 val_430 2010-03-23 12 +430 val_430 2010-03-23 12 +431 val_431 2010-03-23 12 +431 val_431 2010-03-23 12 +431 val_431 2010-03-23 12 +431 val_431 2010-03-23 12 +431 val_431 2010-03-23 12 +431 val_431 2010-03-23 12 +432 val_432 2010-03-23 12 +432 val_432 2010-03-23 12 +435 val_435 2010-03-23 12 +435 val_435 2010-03-23 12 +436 val_436 2010-03-23 12 +436 val_436 2010-03-23 12 +437 val_437 2010-03-23 12 +437 val_437 2010-03-23 12 +438 val_438 2010-03-23 12 +438 val_438 2010-03-23 12 +438 val_438 2010-03-23 12 +438 val_438 2010-03-23 12 +438 val_438 2010-03-23 12 +438 val_438 2010-03-23 12 +439 val_439 2010-03-23 12 +439 val_439 2010-03-23 12 +439 val_439 2010-03-23 12 +439 val_439 2010-03-23 12 +44 val_44 2010-03-23 12 +44 val_44 2010-03-23 12 +443 val_443 2010-03-23 12 +443 val_443 2010-03-23 12 +444 val_444 2010-03-23 12 +444 val_444 2010-03-23 12 +446 val_446 2010-03-23 12 +446 val_446 2010-03-23 12 +448 val_448 2010-03-23 12 +448 val_448 2010-03-23 12 +449 val_449 2010-03-23 12 +449 val_449 2010-03-23 12 +452 val_452 2010-03-23 12 +452 val_452 2010-03-23 12 +453 val_453 2010-03-23 12 +453 val_453 2010-03-23 12 +454 val_454 2010-03-23 12 +454 val_454 2010-03-23 12 +454 val_454 2010-03-23 12 +454 val_454 2010-03-23 12 +454 val_454 2010-03-23 12 +454 val_454 2010-03-23 12 +455 val_455 2010-03-23 12 +455 val_455 2010-03-23 12 +457 val_457 2010-03-23 12 +457 val_457 2010-03-23 12 +458 val_458 2010-03-23 12 +458 val_458 2010-03-23 12 +458 val_458 2010-03-23 12 +458 val_458 2010-03-23 12 +459 val_459 2010-03-23 12 +459 val_459 2010-03-23 12 +459 val_459 2010-03-23 12 +459 val_459 2010-03-23 12 +460 val_460 2010-03-23 12 +460 val_460 2010-03-23 12 +462 val_462 2010-03-23 12 +462 val_462 2010-03-23 12 +462 val_462 2010-03-23 12 +462 val_462 2010-03-23 12 +463 val_463 2010-03-23 12 +463 val_463 2010-03-23 12 +463 val_463 2010-03-23 12 +463 val_463 2010-03-23 12 +466 val_466 2010-03-23 12 +466 val_466 2010-03-23 12 +466 val_466 2010-03-23 12 +466 val_466 2010-03-23 12 +466 val_466 2010-03-23 12 +466 val_466 2010-03-23 12 +467 val_467 2010-03-23 12 +467 val_467 2010-03-23 12 +468 val_468 2010-03-23 12 +468 val_468 2010-03-23 12 +468 val_468 2010-03-23 12 +468 val_468 2010-03-23 12 +468 val_468 2010-03-23 12 +468 val_468 2010-03-23 12 +468 val_468 2010-03-23 12 +468 val_468 2010-03-23 12 +469 val_469 2010-03-23 12 +469 val_469 2010-03-23 12 +469 val_469 2010-03-23 12 +469 val_469 2010-03-23 12 +469 val_469 2010-03-23 12 +469 val_469 2010-03-23 12 +469 val_469 2010-03-23 12 +469 val_469 2010-03-23 12 +469 val_469 2010-03-23 12 +469 val_469 2010-03-23 12 +47 val_47 2010-03-23 12 +47 val_47 2010-03-23 12 +470 val_470 2010-03-23 12 +470 val_470 2010-03-23 12 +472 val_472 2010-03-23 12 +472 val_472 2010-03-23 12 +475 val_475 2010-03-23 12 +475 val_475 2010-03-23 12 +477 val_477 2010-03-23 12 +477 val_477 2010-03-23 12 +478 val_478 2010-03-23 12 +478 val_478 2010-03-23 12 +478 val_478 2010-03-23 12 +478 val_478 2010-03-23 12 +479 val_479 2010-03-23 12 +479 val_479 2010-03-23 12 +480 val_480 2010-03-23 12 +480 val_480 2010-03-23 12 +480 val_480 2010-03-23 12 +480 val_480 2010-03-23 12 +480 val_480 2010-03-23 12 +480 val_480 2010-03-23 12 +481 val_481 2010-03-23 12 +481 val_481 2010-03-23 12 +482 val_482 2010-03-23 12 +482 val_482 2010-03-23 12 +483 val_483 2010-03-23 12 +483 val_483 2010-03-23 12 +484 val_484 2010-03-23 12 +484 val_484 2010-03-23 12 +485 val_485 2010-03-23 12 +485 val_485 2010-03-23 12 +487 val_487 2010-03-23 12 +487 val_487 2010-03-23 12 +489 val_489 2010-03-23 12 +489 val_489 2010-03-23 12 +489 val_489 2010-03-23 12 +489 val_489 2010-03-23 12 +489 val_489 2010-03-23 12 +489 val_489 2010-03-23 12 +489 val_489 2010-03-23 12 +489 val_489 2010-03-23 12 +490 val_490 2010-03-23 12 +490 val_490 2010-03-23 12 +491 val_491 2010-03-23 12 +491 val_491 2010-03-23 12 +492 val_492 2010-03-23 12 +492 val_492 2010-03-23 12 +492 val_492 2010-03-23 12 +492 val_492 2010-03-23 12 +493 val_493 2010-03-23 12 +493 val_493 2010-03-23 12 +494 val_494 2010-03-23 12 +494 val_494 2010-03-23 12 +495 val_495 2010-03-23 12 +495 val_495 2010-03-23 12 +496 val_496 2010-03-23 12 +496 val_496 2010-03-23 12 +497 val_497 2010-03-23 12 +497 val_497 2010-03-23 12 +498 val_498 2010-03-23 12 +498 val_498 2010-03-23 12 +498 val_498 2010-03-23 12 +498 val_498 2010-03-23 12 +498 val_498 2010-03-23 12 +498 val_498 2010-03-23 12 +5 val_5 2010-03-23 12 +5 val_5 2010-03-23 12 +5 val_5 2010-03-23 12 +5 val_5 2010-03-23 12 +5 val_5 2010-03-23 12 +5 val_5 2010-03-23 12 +51 val_51 2010-03-23 12 +51 val_51 2010-03-23 12 +51 val_51 2010-03-23 12 +51 val_51 2010-03-23 12 +53 val_53 2010-03-23 12 +53 val_53 2010-03-23 12 +54 val_54 2010-03-23 12 +54 val_54 2010-03-23 12 +57 val_57 2010-03-23 12 +57 val_57 2010-03-23 12 +58 val_58 2010-03-23 12 +58 val_58 2010-03-23 12 +58 val_58 2010-03-23 12 +58 val_58 2010-03-23 12 +64 val_64 2010-03-23 12 +64 val_64 2010-03-23 12 +65 val_65 2010-03-23 12 +65 val_65 2010-03-23 12 +66 val_66 2010-03-23 12 +66 val_66 2010-03-23 12 +67 val_67 2010-03-23 12 +67 val_67 2010-03-23 12 +67 val_67 2010-03-23 12 +67 val_67 2010-03-23 12 +69 val_69 2010-03-23 12 +69 val_69 2010-03-23 12 +70 val_70 2010-03-23 12 +70 val_70 2010-03-23 12 +70 val_70 2010-03-23 12 +70 val_70 2010-03-23 12 +70 val_70 2010-03-23 12 +70 val_70 2010-03-23 12 +72 val_72 2010-03-23 12 +72 val_72 2010-03-23 12 +72 val_72 2010-03-23 12 +72 val_72 2010-03-23 12 +74 val_74 2010-03-23 12 +74 val_74 2010-03-23 12 +76 val_76 2010-03-23 12 +76 val_76 2010-03-23 12 +76 val_76 2010-03-23 12 +76 val_76 2010-03-23 12 +77 val_77 2010-03-23 12 +77 val_77 2010-03-23 12 +78 val_78 2010-03-23 12 +78 val_78 2010-03-23 12 +8 val_8 2010-03-23 12 +8 val_8 2010-03-23 12 +80 val_80 2010-03-23 12 +80 val_80 2010-03-23 12 +82 val_82 2010-03-23 12 +82 val_82 2010-03-23 12 +83 val_83 2010-03-23 12 +83 val_83 2010-03-23 12 +83 val_83 2010-03-23 12 +83 val_83 2010-03-23 12 +84 val_84 2010-03-23 12 +84 val_84 2010-03-23 12 +84 val_84 2010-03-23 12 +84 val_84 2010-03-23 12 +85 val_85 2010-03-23 12 +85 val_85 2010-03-23 12 +86 val_86 2010-03-23 12 +86 val_86 2010-03-23 12 +87 val_87 2010-03-23 12 +87 val_87 2010-03-23 12 +9 val_9 2010-03-23 12 +9 val_9 2010-03-23 12 +90 val_90 2010-03-23 12 +90 val_90 2010-03-23 12 +90 val_90 2010-03-23 12 +90 val_90 2010-03-23 12 +90 val_90 2010-03-23 12 +90 val_90 2010-03-23 12 +92 val_92 2010-03-23 12 +92 val_92 2010-03-23 12 +95 val_95 2010-03-23 12 +95 val_95 2010-03-23 12 +95 val_95 2010-03-23 12 +95 val_95 2010-03-23 12 +96 val_96 2010-03-23 12 +96 val_96 2010-03-23 12 +97 val_97 2010-03-23 12 +97 val_97 2010-03-23 12 +97 val_97 2010-03-23 12 +97 val_97 2010-03-23 12 +98 val_98 2010-03-23 12 +98 val_98 2010-03-23 12 +98 val_98 2010-03-23 12 +98 val_98 2010-03-23 12 Index: ql/src/test/results/clientpositive/tez/load_dyn_part3.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/load_dyn_part3.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/load_dyn_part3.q.out (working copy) @@ -0,0 +1,2141 @@ +PREHOOK: query: show partitions srcpart +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: show partitions srcpart +POSTHOOK: type: SHOWPARTITIONS +ds=2008-04-08/hr=11 +ds=2008-04-08/hr=12 +ds=2008-04-09/hr=11 +ds=2008-04-09/hr=12 +PREHOOK: query: create table if not exists nzhang_part3 like srcpart +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table if not exists nzhang_part3 like srcpart +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@nzhang_part3 +PREHOOK: query: describe extended nzhang_part3 +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe extended nzhang_part3 +POSTHOOK: type: DESCTABLE +key string default +value string default +ds string None +hr string None + +# Partition Information +# col_name data_type comment + +ds string None +hr string None + +#### A masked pattern was here #### +PREHOOK: query: explain +insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME nzhang_part3) (TOK_PARTSPEC (TOK_PARTVAL ds) (TOK_PARTVAL hr)))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_TABLE_OR_COL ds)) (TOK_SELEXPR (TOK_TABLE_OR_COL hr))) (TOK_WHERE (and (TOK_FUNCTION TOK_ISNOTNULL (TOK_TABLE_OR_COL ds)) (TOK_FUNCTION TOK_ISNOTNULL (TOK_TABLE_OR_COL hr)))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + srcpart + TableScan + alias: srcpart + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + expr: ds + type: string + expr: hr + type: string + outputColumnNames: _col0, _col1, _col2, _col3 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part3 + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds + hr + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part3 + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@nzhang_part3 +POSTHOOK: query: insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@nzhang_part3@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@nzhang_part3@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@nzhang_part3@ds=2008-04-09/hr=11 +POSTHOOK: Output: default@nzhang_part3@ds=2008-04-09/hr=12 +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from nzhang_part3 where ds is not null and hr is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@nzhang_part3 +PREHOOK: Input: default@nzhang_part3@ds=2008-04-08/hr=11 +PREHOOK: Input: default@nzhang_part3@ds=2008-04-08/hr=12 +PREHOOK: Input: default@nzhang_part3@ds=2008-04-09/hr=11 +PREHOOK: Input: default@nzhang_part3@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select * from nzhang_part3 where ds is not null and hr is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@nzhang_part3 +POSTHOOK: Input: default@nzhang_part3@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@nzhang_part3@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@nzhang_part3@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@nzhang_part3@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 2008-04-08 11 +86 val_86 2008-04-08 11 +311 val_311 2008-04-08 11 +27 val_27 2008-04-08 11 +165 val_165 2008-04-08 11 +409 val_409 2008-04-08 11 +255 val_255 2008-04-08 11 +278 val_278 2008-04-08 11 +98 val_98 2008-04-08 11 +484 val_484 2008-04-08 11 +265 val_265 2008-04-08 11 +193 val_193 2008-04-08 11 +401 val_401 2008-04-08 11 +150 val_150 2008-04-08 11 +273 val_273 2008-04-08 11 +224 val_224 2008-04-08 11 +369 val_369 2008-04-08 11 +66 val_66 2008-04-08 11 +128 val_128 2008-04-08 11 +213 val_213 2008-04-08 11 +146 val_146 2008-04-08 11 +406 val_406 2008-04-08 11 +429 val_429 2008-04-08 11 +374 val_374 2008-04-08 11 +152 val_152 2008-04-08 11 +469 val_469 2008-04-08 11 +145 val_145 2008-04-08 11 +495 val_495 2008-04-08 11 +37 val_37 2008-04-08 11 +327 val_327 2008-04-08 11 +281 val_281 2008-04-08 11 +277 val_277 2008-04-08 11 +209 val_209 2008-04-08 11 +15 val_15 2008-04-08 11 +82 val_82 2008-04-08 11 +403 val_403 2008-04-08 11 +166 val_166 2008-04-08 11 +417 val_417 2008-04-08 11 +430 val_430 2008-04-08 11 +252 val_252 2008-04-08 11 +292 val_292 2008-04-08 11 +219 val_219 2008-04-08 11 +287 val_287 2008-04-08 11 +153 val_153 2008-04-08 11 +193 val_193 2008-04-08 11 +338 val_338 2008-04-08 11 +446 val_446 2008-04-08 11 +459 val_459 2008-04-08 11 +394 val_394 2008-04-08 11 +237 val_237 2008-04-08 11 +482 val_482 2008-04-08 11 +174 val_174 2008-04-08 11 +413 val_413 2008-04-08 11 +494 val_494 2008-04-08 11 +207 val_207 2008-04-08 11 +199 val_199 2008-04-08 11 +466 val_466 2008-04-08 11 +208 val_208 2008-04-08 11 +174 val_174 2008-04-08 11 +399 val_399 2008-04-08 11 +396 val_396 2008-04-08 11 +247 val_247 2008-04-08 11 +417 val_417 2008-04-08 11 +489 val_489 2008-04-08 11 +162 val_162 2008-04-08 11 +377 val_377 2008-04-08 11 +397 val_397 2008-04-08 11 +309 val_309 2008-04-08 11 +365 val_365 2008-04-08 11 +266 val_266 2008-04-08 11 +439 val_439 2008-04-08 11 +342 val_342 2008-04-08 11 +367 val_367 2008-04-08 11 +325 val_325 2008-04-08 11 +167 val_167 2008-04-08 11 +195 val_195 2008-04-08 11 +475 val_475 2008-04-08 11 +17 val_17 2008-04-08 11 +113 val_113 2008-04-08 11 +155 val_155 2008-04-08 11 +203 val_203 2008-04-08 11 +339 val_339 2008-04-08 11 +0 val_0 2008-04-08 11 +455 val_455 2008-04-08 11 +128 val_128 2008-04-08 11 +311 val_311 2008-04-08 11 +316 val_316 2008-04-08 11 +57 val_57 2008-04-08 11 +302 val_302 2008-04-08 11 +205 val_205 2008-04-08 11 +149 val_149 2008-04-08 11 +438 val_438 2008-04-08 11 +345 val_345 2008-04-08 11 +129 val_129 2008-04-08 11 +170 val_170 2008-04-08 11 +20 val_20 2008-04-08 11 +489 val_489 2008-04-08 11 +157 val_157 2008-04-08 11 +378 val_378 2008-04-08 11 +221 val_221 2008-04-08 11 +92 val_92 2008-04-08 11 +111 val_111 2008-04-08 11 +47 val_47 2008-04-08 11 +72 val_72 2008-04-08 11 +4 val_4 2008-04-08 11 +280 val_280 2008-04-08 11 +35 val_35 2008-04-08 11 +427 val_427 2008-04-08 11 +277 val_277 2008-04-08 11 +208 val_208 2008-04-08 11 +356 val_356 2008-04-08 11 +399 val_399 2008-04-08 11 +169 val_169 2008-04-08 11 +382 val_382 2008-04-08 11 +498 val_498 2008-04-08 11 +125 val_125 2008-04-08 11 +386 val_386 2008-04-08 11 +437 val_437 2008-04-08 11 +469 val_469 2008-04-08 11 +192 val_192 2008-04-08 11 +286 val_286 2008-04-08 11 +187 val_187 2008-04-08 11 +176 val_176 2008-04-08 11 +54 val_54 2008-04-08 11 +459 val_459 2008-04-08 11 +51 val_51 2008-04-08 11 +138 val_138 2008-04-08 11 +103 val_103 2008-04-08 11 +239 val_239 2008-04-08 11 +213 val_213 2008-04-08 11 +216 val_216 2008-04-08 11 +430 val_430 2008-04-08 11 +278 val_278 2008-04-08 11 +176 val_176 2008-04-08 11 +289 val_289 2008-04-08 11 +221 val_221 2008-04-08 11 +65 val_65 2008-04-08 11 +318 val_318 2008-04-08 11 +332 val_332 2008-04-08 11 +311 val_311 2008-04-08 11 +275 val_275 2008-04-08 11 +137 val_137 2008-04-08 11 +241 val_241 2008-04-08 11 +83 val_83 2008-04-08 11 +333 val_333 2008-04-08 11 +180 val_180 2008-04-08 11 +284 val_284 2008-04-08 11 +12 val_12 2008-04-08 11 +230 val_230 2008-04-08 11 +181 val_181 2008-04-08 11 +67 val_67 2008-04-08 11 +260 val_260 2008-04-08 11 +404 val_404 2008-04-08 11 +384 val_384 2008-04-08 11 +489 val_489 2008-04-08 11 +353 val_353 2008-04-08 11 +373 val_373 2008-04-08 11 +272 val_272 2008-04-08 11 +138 val_138 2008-04-08 11 +217 val_217 2008-04-08 11 +84 val_84 2008-04-08 11 +348 val_348 2008-04-08 11 +466 val_466 2008-04-08 11 +58 val_58 2008-04-08 11 +8 val_8 2008-04-08 11 +411 val_411 2008-04-08 11 +230 val_230 2008-04-08 11 +208 val_208 2008-04-08 11 +348 val_348 2008-04-08 11 +24 val_24 2008-04-08 11 +463 val_463 2008-04-08 11 +431 val_431 2008-04-08 11 +179 val_179 2008-04-08 11 +172 val_172 2008-04-08 11 +42 val_42 2008-04-08 11 +129 val_129 2008-04-08 11 +158 val_158 2008-04-08 11 +119 val_119 2008-04-08 11 +496 val_496 2008-04-08 11 +0 val_0 2008-04-08 11 +322 val_322 2008-04-08 11 +197 val_197 2008-04-08 11 +468 val_468 2008-04-08 11 +393 val_393 2008-04-08 11 +454 val_454 2008-04-08 11 +100 val_100 2008-04-08 11 +298 val_298 2008-04-08 11 +199 val_199 2008-04-08 11 +191 val_191 2008-04-08 11 +418 val_418 2008-04-08 11 +96 val_96 2008-04-08 11 +26 val_26 2008-04-08 11 +165 val_165 2008-04-08 11 +327 val_327 2008-04-08 11 +230 val_230 2008-04-08 11 +205 val_205 2008-04-08 11 +120 val_120 2008-04-08 11 +131 val_131 2008-04-08 11 +51 val_51 2008-04-08 11 +404 val_404 2008-04-08 11 +43 val_43 2008-04-08 11 +436 val_436 2008-04-08 11 +156 val_156 2008-04-08 11 +469 val_469 2008-04-08 11 +468 val_468 2008-04-08 11 +308 val_308 2008-04-08 11 +95 val_95 2008-04-08 11 +196 val_196 2008-04-08 11 +288 val_288 2008-04-08 11 +481 val_481 2008-04-08 11 +457 val_457 2008-04-08 11 +98 val_98 2008-04-08 11 +282 val_282 2008-04-08 11 +197 val_197 2008-04-08 11 +187 val_187 2008-04-08 11 +318 val_318 2008-04-08 11 +318 val_318 2008-04-08 11 +409 val_409 2008-04-08 11 +470 val_470 2008-04-08 11 +137 val_137 2008-04-08 11 +369 val_369 2008-04-08 11 +316 val_316 2008-04-08 11 +169 val_169 2008-04-08 11 +413 val_413 2008-04-08 11 +85 val_85 2008-04-08 11 +77 val_77 2008-04-08 11 +0 val_0 2008-04-08 11 +490 val_490 2008-04-08 11 +87 val_87 2008-04-08 11 +364 val_364 2008-04-08 11 +179 val_179 2008-04-08 11 +118 val_118 2008-04-08 11 +134 val_134 2008-04-08 11 +395 val_395 2008-04-08 11 +282 val_282 2008-04-08 11 +138 val_138 2008-04-08 11 +238 val_238 2008-04-08 11 +419 val_419 2008-04-08 11 +15 val_15 2008-04-08 11 +118 val_118 2008-04-08 11 +72 val_72 2008-04-08 11 +90 val_90 2008-04-08 11 +307 val_307 2008-04-08 11 +19 val_19 2008-04-08 11 +435 val_435 2008-04-08 11 +10 val_10 2008-04-08 11 +277 val_277 2008-04-08 11 +273 val_273 2008-04-08 11 +306 val_306 2008-04-08 11 +224 val_224 2008-04-08 11 +309 val_309 2008-04-08 11 +389 val_389 2008-04-08 11 +327 val_327 2008-04-08 11 +242 val_242 2008-04-08 11 +369 val_369 2008-04-08 11 +392 val_392 2008-04-08 11 +272 val_272 2008-04-08 11 +331 val_331 2008-04-08 11 +401 val_401 2008-04-08 11 +242 val_242 2008-04-08 11 +452 val_452 2008-04-08 11 +177 val_177 2008-04-08 11 +226 val_226 2008-04-08 11 +5 val_5 2008-04-08 11 +497 val_497 2008-04-08 11 +402 val_402 2008-04-08 11 +396 val_396 2008-04-08 11 +317 val_317 2008-04-08 11 +395 val_395 2008-04-08 11 +58 val_58 2008-04-08 11 +35 val_35 2008-04-08 11 +336 val_336 2008-04-08 11 +95 val_95 2008-04-08 11 +11 val_11 2008-04-08 11 +168 val_168 2008-04-08 11 +34 val_34 2008-04-08 11 +229 val_229 2008-04-08 11 +233 val_233 2008-04-08 11 +143 val_143 2008-04-08 11 +472 val_472 2008-04-08 11 +322 val_322 2008-04-08 11 +498 val_498 2008-04-08 11 +160 val_160 2008-04-08 11 +195 val_195 2008-04-08 11 +42 val_42 2008-04-08 11 +321 val_321 2008-04-08 11 +430 val_430 2008-04-08 11 +119 val_119 2008-04-08 11 +489 val_489 2008-04-08 11 +458 val_458 2008-04-08 11 +78 val_78 2008-04-08 11 +76 val_76 2008-04-08 11 +41 val_41 2008-04-08 11 +223 val_223 2008-04-08 11 +492 val_492 2008-04-08 11 +149 val_149 2008-04-08 11 +449 val_449 2008-04-08 11 +218 val_218 2008-04-08 11 +228 val_228 2008-04-08 11 +138 val_138 2008-04-08 11 +453 val_453 2008-04-08 11 +30 val_30 2008-04-08 11 +209 val_209 2008-04-08 11 +64 val_64 2008-04-08 11 +468 val_468 2008-04-08 11 +76 val_76 2008-04-08 11 +74 val_74 2008-04-08 11 +342 val_342 2008-04-08 11 +69 val_69 2008-04-08 11 +230 val_230 2008-04-08 11 +33 val_33 2008-04-08 11 +368 val_368 2008-04-08 11 +103 val_103 2008-04-08 11 +296 val_296 2008-04-08 11 +113 val_113 2008-04-08 11 +216 val_216 2008-04-08 11 +367 val_367 2008-04-08 11 +344 val_344 2008-04-08 11 +167 val_167 2008-04-08 11 +274 val_274 2008-04-08 11 +219 val_219 2008-04-08 11 +239 val_239 2008-04-08 11 +485 val_485 2008-04-08 11 +116 val_116 2008-04-08 11 +223 val_223 2008-04-08 11 +256 val_256 2008-04-08 11 +263 val_263 2008-04-08 11 +70 val_70 2008-04-08 11 +487 val_487 2008-04-08 11 +480 val_480 2008-04-08 11 +401 val_401 2008-04-08 11 +288 val_288 2008-04-08 11 +191 val_191 2008-04-08 11 +5 val_5 2008-04-08 11 +244 val_244 2008-04-08 11 +438 val_438 2008-04-08 11 +128 val_128 2008-04-08 11 +467 val_467 2008-04-08 11 +432 val_432 2008-04-08 11 +202 val_202 2008-04-08 11 +316 val_316 2008-04-08 11 +229 val_229 2008-04-08 11 +469 val_469 2008-04-08 11 +463 val_463 2008-04-08 11 +280 val_280 2008-04-08 11 +2 val_2 2008-04-08 11 +35 val_35 2008-04-08 11 +283 val_283 2008-04-08 11 +331 val_331 2008-04-08 11 +235 val_235 2008-04-08 11 +80 val_80 2008-04-08 11 +44 val_44 2008-04-08 11 +193 val_193 2008-04-08 11 +321 val_321 2008-04-08 11 +335 val_335 2008-04-08 11 +104 val_104 2008-04-08 11 +466 val_466 2008-04-08 11 +366 val_366 2008-04-08 11 +175 val_175 2008-04-08 11 +403 val_403 2008-04-08 11 +483 val_483 2008-04-08 11 +53 val_53 2008-04-08 11 +105 val_105 2008-04-08 11 +257 val_257 2008-04-08 11 +406 val_406 2008-04-08 11 +409 val_409 2008-04-08 11 +190 val_190 2008-04-08 11 +406 val_406 2008-04-08 11 +401 val_401 2008-04-08 11 +114 val_114 2008-04-08 11 +258 val_258 2008-04-08 11 +90 val_90 2008-04-08 11 +203 val_203 2008-04-08 11 +262 val_262 2008-04-08 11 +348 val_348 2008-04-08 11 +424 val_424 2008-04-08 11 +12 val_12 2008-04-08 11 +396 val_396 2008-04-08 11 +201 val_201 2008-04-08 11 +217 val_217 2008-04-08 11 +164 val_164 2008-04-08 11 +431 val_431 2008-04-08 11 +454 val_454 2008-04-08 11 +478 val_478 2008-04-08 11 +298 val_298 2008-04-08 11 +125 val_125 2008-04-08 11 +431 val_431 2008-04-08 11 +164 val_164 2008-04-08 11 +424 val_424 2008-04-08 11 +187 val_187 2008-04-08 11 +382 val_382 2008-04-08 11 +5 val_5 2008-04-08 11 +70 val_70 2008-04-08 11 +397 val_397 2008-04-08 11 +480 val_480 2008-04-08 11 +291 val_291 2008-04-08 11 +24 val_24 2008-04-08 11 +351 val_351 2008-04-08 11 +255 val_255 2008-04-08 11 +104 val_104 2008-04-08 11 +70 val_70 2008-04-08 11 +163 val_163 2008-04-08 11 +438 val_438 2008-04-08 11 +119 val_119 2008-04-08 11 +414 val_414 2008-04-08 11 +200 val_200 2008-04-08 11 +491 val_491 2008-04-08 11 +237 val_237 2008-04-08 11 +439 val_439 2008-04-08 11 +360 val_360 2008-04-08 11 +248 val_248 2008-04-08 11 +479 val_479 2008-04-08 11 +305 val_305 2008-04-08 11 +417 val_417 2008-04-08 11 +199 val_199 2008-04-08 11 +444 val_444 2008-04-08 11 +120 val_120 2008-04-08 11 +429 val_429 2008-04-08 11 +169 val_169 2008-04-08 11 +443 val_443 2008-04-08 11 +323 val_323 2008-04-08 11 +325 val_325 2008-04-08 11 +277 val_277 2008-04-08 11 +230 val_230 2008-04-08 11 +478 val_478 2008-04-08 11 +178 val_178 2008-04-08 11 +468 val_468 2008-04-08 11 +310 val_310 2008-04-08 11 +317 val_317 2008-04-08 11 +333 val_333 2008-04-08 11 +493 val_493 2008-04-08 11 +460 val_460 2008-04-08 11 +207 val_207 2008-04-08 11 +249 val_249 2008-04-08 11 +265 val_265 2008-04-08 11 +480 val_480 2008-04-08 11 +83 val_83 2008-04-08 11 +136 val_136 2008-04-08 11 +353 val_353 2008-04-08 11 +172 val_172 2008-04-08 11 +214 val_214 2008-04-08 11 +462 val_462 2008-04-08 11 +233 val_233 2008-04-08 11 +406 val_406 2008-04-08 11 +133 val_133 2008-04-08 11 +175 val_175 2008-04-08 11 +189 val_189 2008-04-08 11 +454 val_454 2008-04-08 11 +375 val_375 2008-04-08 11 +401 val_401 2008-04-08 11 +421 val_421 2008-04-08 11 +407 val_407 2008-04-08 11 +384 val_384 2008-04-08 11 +256 val_256 2008-04-08 11 +26 val_26 2008-04-08 11 +134 val_134 2008-04-08 11 +67 val_67 2008-04-08 11 +384 val_384 2008-04-08 11 +379 val_379 2008-04-08 11 +18 val_18 2008-04-08 11 +462 val_462 2008-04-08 11 +492 val_492 2008-04-08 11 +100 val_100 2008-04-08 11 +298 val_298 2008-04-08 11 +9 val_9 2008-04-08 11 +341 val_341 2008-04-08 11 +498 val_498 2008-04-08 11 +146 val_146 2008-04-08 11 +458 val_458 2008-04-08 11 +362 val_362 2008-04-08 11 +186 val_186 2008-04-08 11 +285 val_285 2008-04-08 11 +348 val_348 2008-04-08 11 +167 val_167 2008-04-08 11 +18 val_18 2008-04-08 11 +273 val_273 2008-04-08 11 +183 val_183 2008-04-08 11 +281 val_281 2008-04-08 11 +344 val_344 2008-04-08 11 +97 val_97 2008-04-08 11 +469 val_469 2008-04-08 11 +315 val_315 2008-04-08 11 +84 val_84 2008-04-08 11 +28 val_28 2008-04-08 11 +37 val_37 2008-04-08 11 +448 val_448 2008-04-08 11 +152 val_152 2008-04-08 11 +348 val_348 2008-04-08 11 +307 val_307 2008-04-08 11 +194 val_194 2008-04-08 11 +414 val_414 2008-04-08 11 +477 val_477 2008-04-08 11 +222 val_222 2008-04-08 11 +126 val_126 2008-04-08 11 +90 val_90 2008-04-08 11 +169 val_169 2008-04-08 11 +403 val_403 2008-04-08 11 +400 val_400 2008-04-08 11 +200 val_200 2008-04-08 11 +97 val_97 2008-04-08 11 +238 val_238 2008-04-08 12 +86 val_86 2008-04-08 12 +311 val_311 2008-04-08 12 +27 val_27 2008-04-08 12 +165 val_165 2008-04-08 12 +409 val_409 2008-04-08 12 +255 val_255 2008-04-08 12 +278 val_278 2008-04-08 12 +98 val_98 2008-04-08 12 +484 val_484 2008-04-08 12 +265 val_265 2008-04-08 12 +193 val_193 2008-04-08 12 +401 val_401 2008-04-08 12 +150 val_150 2008-04-08 12 +273 val_273 2008-04-08 12 +224 val_224 2008-04-08 12 +369 val_369 2008-04-08 12 +66 val_66 2008-04-08 12 +128 val_128 2008-04-08 12 +213 val_213 2008-04-08 12 +146 val_146 2008-04-08 12 +406 val_406 2008-04-08 12 +429 val_429 2008-04-08 12 +374 val_374 2008-04-08 12 +152 val_152 2008-04-08 12 +469 val_469 2008-04-08 12 +145 val_145 2008-04-08 12 +495 val_495 2008-04-08 12 +37 val_37 2008-04-08 12 +327 val_327 2008-04-08 12 +281 val_281 2008-04-08 12 +277 val_277 2008-04-08 12 +209 val_209 2008-04-08 12 +15 val_15 2008-04-08 12 +82 val_82 2008-04-08 12 +403 val_403 2008-04-08 12 +166 val_166 2008-04-08 12 +417 val_417 2008-04-08 12 +430 val_430 2008-04-08 12 +252 val_252 2008-04-08 12 +292 val_292 2008-04-08 12 +219 val_219 2008-04-08 12 +287 val_287 2008-04-08 12 +153 val_153 2008-04-08 12 +193 val_193 2008-04-08 12 +338 val_338 2008-04-08 12 +446 val_446 2008-04-08 12 +459 val_459 2008-04-08 12 +394 val_394 2008-04-08 12 +237 val_237 2008-04-08 12 +482 val_482 2008-04-08 12 +174 val_174 2008-04-08 12 +413 val_413 2008-04-08 12 +494 val_494 2008-04-08 12 +207 val_207 2008-04-08 12 +199 val_199 2008-04-08 12 +466 val_466 2008-04-08 12 +208 val_208 2008-04-08 12 +174 val_174 2008-04-08 12 +399 val_399 2008-04-08 12 +396 val_396 2008-04-08 12 +247 val_247 2008-04-08 12 +417 val_417 2008-04-08 12 +489 val_489 2008-04-08 12 +162 val_162 2008-04-08 12 +377 val_377 2008-04-08 12 +397 val_397 2008-04-08 12 +309 val_309 2008-04-08 12 +365 val_365 2008-04-08 12 +266 val_266 2008-04-08 12 +439 val_439 2008-04-08 12 +342 val_342 2008-04-08 12 +367 val_367 2008-04-08 12 +325 val_325 2008-04-08 12 +167 val_167 2008-04-08 12 +195 val_195 2008-04-08 12 +475 val_475 2008-04-08 12 +17 val_17 2008-04-08 12 +113 val_113 2008-04-08 12 +155 val_155 2008-04-08 12 +203 val_203 2008-04-08 12 +339 val_339 2008-04-08 12 +0 val_0 2008-04-08 12 +455 val_455 2008-04-08 12 +128 val_128 2008-04-08 12 +311 val_311 2008-04-08 12 +316 val_316 2008-04-08 12 +57 val_57 2008-04-08 12 +302 val_302 2008-04-08 12 +205 val_205 2008-04-08 12 +149 val_149 2008-04-08 12 +438 val_438 2008-04-08 12 +345 val_345 2008-04-08 12 +129 val_129 2008-04-08 12 +170 val_170 2008-04-08 12 +20 val_20 2008-04-08 12 +489 val_489 2008-04-08 12 +157 val_157 2008-04-08 12 +378 val_378 2008-04-08 12 +221 val_221 2008-04-08 12 +92 val_92 2008-04-08 12 +111 val_111 2008-04-08 12 +47 val_47 2008-04-08 12 +72 val_72 2008-04-08 12 +4 val_4 2008-04-08 12 +280 val_280 2008-04-08 12 +35 val_35 2008-04-08 12 +427 val_427 2008-04-08 12 +277 val_277 2008-04-08 12 +208 val_208 2008-04-08 12 +356 val_356 2008-04-08 12 +399 val_399 2008-04-08 12 +169 val_169 2008-04-08 12 +382 val_382 2008-04-08 12 +498 val_498 2008-04-08 12 +125 val_125 2008-04-08 12 +386 val_386 2008-04-08 12 +437 val_437 2008-04-08 12 +469 val_469 2008-04-08 12 +192 val_192 2008-04-08 12 +286 val_286 2008-04-08 12 +187 val_187 2008-04-08 12 +176 val_176 2008-04-08 12 +54 val_54 2008-04-08 12 +459 val_459 2008-04-08 12 +51 val_51 2008-04-08 12 +138 val_138 2008-04-08 12 +103 val_103 2008-04-08 12 +239 val_239 2008-04-08 12 +213 val_213 2008-04-08 12 +216 val_216 2008-04-08 12 +430 val_430 2008-04-08 12 +278 val_278 2008-04-08 12 +176 val_176 2008-04-08 12 +289 val_289 2008-04-08 12 +221 val_221 2008-04-08 12 +65 val_65 2008-04-08 12 +318 val_318 2008-04-08 12 +332 val_332 2008-04-08 12 +311 val_311 2008-04-08 12 +275 val_275 2008-04-08 12 +137 val_137 2008-04-08 12 +241 val_241 2008-04-08 12 +83 val_83 2008-04-08 12 +333 val_333 2008-04-08 12 +180 val_180 2008-04-08 12 +284 val_284 2008-04-08 12 +12 val_12 2008-04-08 12 +230 val_230 2008-04-08 12 +181 val_181 2008-04-08 12 +67 val_67 2008-04-08 12 +260 val_260 2008-04-08 12 +404 val_404 2008-04-08 12 +384 val_384 2008-04-08 12 +489 val_489 2008-04-08 12 +353 val_353 2008-04-08 12 +373 val_373 2008-04-08 12 +272 val_272 2008-04-08 12 +138 val_138 2008-04-08 12 +217 val_217 2008-04-08 12 +84 val_84 2008-04-08 12 +348 val_348 2008-04-08 12 +466 val_466 2008-04-08 12 +58 val_58 2008-04-08 12 +8 val_8 2008-04-08 12 +411 val_411 2008-04-08 12 +230 val_230 2008-04-08 12 +208 val_208 2008-04-08 12 +348 val_348 2008-04-08 12 +24 val_24 2008-04-08 12 +463 val_463 2008-04-08 12 +431 val_431 2008-04-08 12 +179 val_179 2008-04-08 12 +172 val_172 2008-04-08 12 +42 val_42 2008-04-08 12 +129 val_129 2008-04-08 12 +158 val_158 2008-04-08 12 +119 val_119 2008-04-08 12 +496 val_496 2008-04-08 12 +0 val_0 2008-04-08 12 +322 val_322 2008-04-08 12 +197 val_197 2008-04-08 12 +468 val_468 2008-04-08 12 +393 val_393 2008-04-08 12 +454 val_454 2008-04-08 12 +100 val_100 2008-04-08 12 +298 val_298 2008-04-08 12 +199 val_199 2008-04-08 12 +191 val_191 2008-04-08 12 +418 val_418 2008-04-08 12 +96 val_96 2008-04-08 12 +26 val_26 2008-04-08 12 +165 val_165 2008-04-08 12 +327 val_327 2008-04-08 12 +230 val_230 2008-04-08 12 +205 val_205 2008-04-08 12 +120 val_120 2008-04-08 12 +131 val_131 2008-04-08 12 +51 val_51 2008-04-08 12 +404 val_404 2008-04-08 12 +43 val_43 2008-04-08 12 +436 val_436 2008-04-08 12 +156 val_156 2008-04-08 12 +469 val_469 2008-04-08 12 +468 val_468 2008-04-08 12 +308 val_308 2008-04-08 12 +95 val_95 2008-04-08 12 +196 val_196 2008-04-08 12 +288 val_288 2008-04-08 12 +481 val_481 2008-04-08 12 +457 val_457 2008-04-08 12 +98 val_98 2008-04-08 12 +282 val_282 2008-04-08 12 +197 val_197 2008-04-08 12 +187 val_187 2008-04-08 12 +318 val_318 2008-04-08 12 +318 val_318 2008-04-08 12 +409 val_409 2008-04-08 12 +470 val_470 2008-04-08 12 +137 val_137 2008-04-08 12 +369 val_369 2008-04-08 12 +316 val_316 2008-04-08 12 +169 val_169 2008-04-08 12 +413 val_413 2008-04-08 12 +85 val_85 2008-04-08 12 +77 val_77 2008-04-08 12 +0 val_0 2008-04-08 12 +490 val_490 2008-04-08 12 +87 val_87 2008-04-08 12 +364 val_364 2008-04-08 12 +179 val_179 2008-04-08 12 +118 val_118 2008-04-08 12 +134 val_134 2008-04-08 12 +395 val_395 2008-04-08 12 +282 val_282 2008-04-08 12 +138 val_138 2008-04-08 12 +238 val_238 2008-04-08 12 +419 val_419 2008-04-08 12 +15 val_15 2008-04-08 12 +118 val_118 2008-04-08 12 +72 val_72 2008-04-08 12 +90 val_90 2008-04-08 12 +307 val_307 2008-04-08 12 +19 val_19 2008-04-08 12 +435 val_435 2008-04-08 12 +10 val_10 2008-04-08 12 +277 val_277 2008-04-08 12 +273 val_273 2008-04-08 12 +306 val_306 2008-04-08 12 +224 val_224 2008-04-08 12 +309 val_309 2008-04-08 12 +389 val_389 2008-04-08 12 +327 val_327 2008-04-08 12 +242 val_242 2008-04-08 12 +369 val_369 2008-04-08 12 +392 val_392 2008-04-08 12 +272 val_272 2008-04-08 12 +331 val_331 2008-04-08 12 +401 val_401 2008-04-08 12 +242 val_242 2008-04-08 12 +452 val_452 2008-04-08 12 +177 val_177 2008-04-08 12 +226 val_226 2008-04-08 12 +5 val_5 2008-04-08 12 +497 val_497 2008-04-08 12 +402 val_402 2008-04-08 12 +396 val_396 2008-04-08 12 +317 val_317 2008-04-08 12 +395 val_395 2008-04-08 12 +58 val_58 2008-04-08 12 +35 val_35 2008-04-08 12 +336 val_336 2008-04-08 12 +95 val_95 2008-04-08 12 +11 val_11 2008-04-08 12 +168 val_168 2008-04-08 12 +34 val_34 2008-04-08 12 +229 val_229 2008-04-08 12 +233 val_233 2008-04-08 12 +143 val_143 2008-04-08 12 +472 val_472 2008-04-08 12 +322 val_322 2008-04-08 12 +498 val_498 2008-04-08 12 +160 val_160 2008-04-08 12 +195 val_195 2008-04-08 12 +42 val_42 2008-04-08 12 +321 val_321 2008-04-08 12 +430 val_430 2008-04-08 12 +119 val_119 2008-04-08 12 +489 val_489 2008-04-08 12 +458 val_458 2008-04-08 12 +78 val_78 2008-04-08 12 +76 val_76 2008-04-08 12 +41 val_41 2008-04-08 12 +223 val_223 2008-04-08 12 +492 val_492 2008-04-08 12 +149 val_149 2008-04-08 12 +449 val_449 2008-04-08 12 +218 val_218 2008-04-08 12 +228 val_228 2008-04-08 12 +138 val_138 2008-04-08 12 +453 val_453 2008-04-08 12 +30 val_30 2008-04-08 12 +209 val_209 2008-04-08 12 +64 val_64 2008-04-08 12 +468 val_468 2008-04-08 12 +76 val_76 2008-04-08 12 +74 val_74 2008-04-08 12 +342 val_342 2008-04-08 12 +69 val_69 2008-04-08 12 +230 val_230 2008-04-08 12 +33 val_33 2008-04-08 12 +368 val_368 2008-04-08 12 +103 val_103 2008-04-08 12 +296 val_296 2008-04-08 12 +113 val_113 2008-04-08 12 +216 val_216 2008-04-08 12 +367 val_367 2008-04-08 12 +344 val_344 2008-04-08 12 +167 val_167 2008-04-08 12 +274 val_274 2008-04-08 12 +219 val_219 2008-04-08 12 +239 val_239 2008-04-08 12 +485 val_485 2008-04-08 12 +116 val_116 2008-04-08 12 +223 val_223 2008-04-08 12 +256 val_256 2008-04-08 12 +263 val_263 2008-04-08 12 +70 val_70 2008-04-08 12 +487 val_487 2008-04-08 12 +480 val_480 2008-04-08 12 +401 val_401 2008-04-08 12 +288 val_288 2008-04-08 12 +191 val_191 2008-04-08 12 +5 val_5 2008-04-08 12 +244 val_244 2008-04-08 12 +438 val_438 2008-04-08 12 +128 val_128 2008-04-08 12 +467 val_467 2008-04-08 12 +432 val_432 2008-04-08 12 +202 val_202 2008-04-08 12 +316 val_316 2008-04-08 12 +229 val_229 2008-04-08 12 +469 val_469 2008-04-08 12 +463 val_463 2008-04-08 12 +280 val_280 2008-04-08 12 +2 val_2 2008-04-08 12 +35 val_35 2008-04-08 12 +283 val_283 2008-04-08 12 +331 val_331 2008-04-08 12 +235 val_235 2008-04-08 12 +80 val_80 2008-04-08 12 +44 val_44 2008-04-08 12 +193 val_193 2008-04-08 12 +321 val_321 2008-04-08 12 +335 val_335 2008-04-08 12 +104 val_104 2008-04-08 12 +466 val_466 2008-04-08 12 +366 val_366 2008-04-08 12 +175 val_175 2008-04-08 12 +403 val_403 2008-04-08 12 +483 val_483 2008-04-08 12 +53 val_53 2008-04-08 12 +105 val_105 2008-04-08 12 +257 val_257 2008-04-08 12 +406 val_406 2008-04-08 12 +409 val_409 2008-04-08 12 +190 val_190 2008-04-08 12 +406 val_406 2008-04-08 12 +401 val_401 2008-04-08 12 +114 val_114 2008-04-08 12 +258 val_258 2008-04-08 12 +90 val_90 2008-04-08 12 +203 val_203 2008-04-08 12 +262 val_262 2008-04-08 12 +348 val_348 2008-04-08 12 +424 val_424 2008-04-08 12 +12 val_12 2008-04-08 12 +396 val_396 2008-04-08 12 +201 val_201 2008-04-08 12 +217 val_217 2008-04-08 12 +164 val_164 2008-04-08 12 +431 val_431 2008-04-08 12 +454 val_454 2008-04-08 12 +478 val_478 2008-04-08 12 +298 val_298 2008-04-08 12 +125 val_125 2008-04-08 12 +431 val_431 2008-04-08 12 +164 val_164 2008-04-08 12 +424 val_424 2008-04-08 12 +187 val_187 2008-04-08 12 +382 val_382 2008-04-08 12 +5 val_5 2008-04-08 12 +70 val_70 2008-04-08 12 +397 val_397 2008-04-08 12 +480 val_480 2008-04-08 12 +291 val_291 2008-04-08 12 +24 val_24 2008-04-08 12 +351 val_351 2008-04-08 12 +255 val_255 2008-04-08 12 +104 val_104 2008-04-08 12 +70 val_70 2008-04-08 12 +163 val_163 2008-04-08 12 +438 val_438 2008-04-08 12 +119 val_119 2008-04-08 12 +414 val_414 2008-04-08 12 +200 val_200 2008-04-08 12 +491 val_491 2008-04-08 12 +237 val_237 2008-04-08 12 +439 val_439 2008-04-08 12 +360 val_360 2008-04-08 12 +248 val_248 2008-04-08 12 +479 val_479 2008-04-08 12 +305 val_305 2008-04-08 12 +417 val_417 2008-04-08 12 +199 val_199 2008-04-08 12 +444 val_444 2008-04-08 12 +120 val_120 2008-04-08 12 +429 val_429 2008-04-08 12 +169 val_169 2008-04-08 12 +443 val_443 2008-04-08 12 +323 val_323 2008-04-08 12 +325 val_325 2008-04-08 12 +277 val_277 2008-04-08 12 +230 val_230 2008-04-08 12 +478 val_478 2008-04-08 12 +178 val_178 2008-04-08 12 +468 val_468 2008-04-08 12 +310 val_310 2008-04-08 12 +317 val_317 2008-04-08 12 +333 val_333 2008-04-08 12 +493 val_493 2008-04-08 12 +460 val_460 2008-04-08 12 +207 val_207 2008-04-08 12 +249 val_249 2008-04-08 12 +265 val_265 2008-04-08 12 +480 val_480 2008-04-08 12 +83 val_83 2008-04-08 12 +136 val_136 2008-04-08 12 +353 val_353 2008-04-08 12 +172 val_172 2008-04-08 12 +214 val_214 2008-04-08 12 +462 val_462 2008-04-08 12 +233 val_233 2008-04-08 12 +406 val_406 2008-04-08 12 +133 val_133 2008-04-08 12 +175 val_175 2008-04-08 12 +189 val_189 2008-04-08 12 +454 val_454 2008-04-08 12 +375 val_375 2008-04-08 12 +401 val_401 2008-04-08 12 +421 val_421 2008-04-08 12 +407 val_407 2008-04-08 12 +384 val_384 2008-04-08 12 +256 val_256 2008-04-08 12 +26 val_26 2008-04-08 12 +134 val_134 2008-04-08 12 +67 val_67 2008-04-08 12 +384 val_384 2008-04-08 12 +379 val_379 2008-04-08 12 +18 val_18 2008-04-08 12 +462 val_462 2008-04-08 12 +492 val_492 2008-04-08 12 +100 val_100 2008-04-08 12 +298 val_298 2008-04-08 12 +9 val_9 2008-04-08 12 +341 val_341 2008-04-08 12 +498 val_498 2008-04-08 12 +146 val_146 2008-04-08 12 +458 val_458 2008-04-08 12 +362 val_362 2008-04-08 12 +186 val_186 2008-04-08 12 +285 val_285 2008-04-08 12 +348 val_348 2008-04-08 12 +167 val_167 2008-04-08 12 +18 val_18 2008-04-08 12 +273 val_273 2008-04-08 12 +183 val_183 2008-04-08 12 +281 val_281 2008-04-08 12 +344 val_344 2008-04-08 12 +97 val_97 2008-04-08 12 +469 val_469 2008-04-08 12 +315 val_315 2008-04-08 12 +84 val_84 2008-04-08 12 +28 val_28 2008-04-08 12 +37 val_37 2008-04-08 12 +448 val_448 2008-04-08 12 +152 val_152 2008-04-08 12 +348 val_348 2008-04-08 12 +307 val_307 2008-04-08 12 +194 val_194 2008-04-08 12 +414 val_414 2008-04-08 12 +477 val_477 2008-04-08 12 +222 val_222 2008-04-08 12 +126 val_126 2008-04-08 12 +90 val_90 2008-04-08 12 +169 val_169 2008-04-08 12 +403 val_403 2008-04-08 12 +400 val_400 2008-04-08 12 +200 val_200 2008-04-08 12 +97 val_97 2008-04-08 12 +238 val_238 2008-04-09 11 +86 val_86 2008-04-09 11 +311 val_311 2008-04-09 11 +27 val_27 2008-04-09 11 +165 val_165 2008-04-09 11 +409 val_409 2008-04-09 11 +255 val_255 2008-04-09 11 +278 val_278 2008-04-09 11 +98 val_98 2008-04-09 11 +484 val_484 2008-04-09 11 +265 val_265 2008-04-09 11 +193 val_193 2008-04-09 11 +401 val_401 2008-04-09 11 +150 val_150 2008-04-09 11 +273 val_273 2008-04-09 11 +224 val_224 2008-04-09 11 +369 val_369 2008-04-09 11 +66 val_66 2008-04-09 11 +128 val_128 2008-04-09 11 +213 val_213 2008-04-09 11 +146 val_146 2008-04-09 11 +406 val_406 2008-04-09 11 +429 val_429 2008-04-09 11 +374 val_374 2008-04-09 11 +152 val_152 2008-04-09 11 +469 val_469 2008-04-09 11 +145 val_145 2008-04-09 11 +495 val_495 2008-04-09 11 +37 val_37 2008-04-09 11 +327 val_327 2008-04-09 11 +281 val_281 2008-04-09 11 +277 val_277 2008-04-09 11 +209 val_209 2008-04-09 11 +15 val_15 2008-04-09 11 +82 val_82 2008-04-09 11 +403 val_403 2008-04-09 11 +166 val_166 2008-04-09 11 +417 val_417 2008-04-09 11 +430 val_430 2008-04-09 11 +252 val_252 2008-04-09 11 +292 val_292 2008-04-09 11 +219 val_219 2008-04-09 11 +287 val_287 2008-04-09 11 +153 val_153 2008-04-09 11 +193 val_193 2008-04-09 11 +338 val_338 2008-04-09 11 +446 val_446 2008-04-09 11 +459 val_459 2008-04-09 11 +394 val_394 2008-04-09 11 +237 val_237 2008-04-09 11 +482 val_482 2008-04-09 11 +174 val_174 2008-04-09 11 +413 val_413 2008-04-09 11 +494 val_494 2008-04-09 11 +207 val_207 2008-04-09 11 +199 val_199 2008-04-09 11 +466 val_466 2008-04-09 11 +208 val_208 2008-04-09 11 +174 val_174 2008-04-09 11 +399 val_399 2008-04-09 11 +396 val_396 2008-04-09 11 +247 val_247 2008-04-09 11 +417 val_417 2008-04-09 11 +489 val_489 2008-04-09 11 +162 val_162 2008-04-09 11 +377 val_377 2008-04-09 11 +397 val_397 2008-04-09 11 +309 val_309 2008-04-09 11 +365 val_365 2008-04-09 11 +266 val_266 2008-04-09 11 +439 val_439 2008-04-09 11 +342 val_342 2008-04-09 11 +367 val_367 2008-04-09 11 +325 val_325 2008-04-09 11 +167 val_167 2008-04-09 11 +195 val_195 2008-04-09 11 +475 val_475 2008-04-09 11 +17 val_17 2008-04-09 11 +113 val_113 2008-04-09 11 +155 val_155 2008-04-09 11 +203 val_203 2008-04-09 11 +339 val_339 2008-04-09 11 +0 val_0 2008-04-09 11 +455 val_455 2008-04-09 11 +128 val_128 2008-04-09 11 +311 val_311 2008-04-09 11 +316 val_316 2008-04-09 11 +57 val_57 2008-04-09 11 +302 val_302 2008-04-09 11 +205 val_205 2008-04-09 11 +149 val_149 2008-04-09 11 +438 val_438 2008-04-09 11 +345 val_345 2008-04-09 11 +129 val_129 2008-04-09 11 +170 val_170 2008-04-09 11 +20 val_20 2008-04-09 11 +489 val_489 2008-04-09 11 +157 val_157 2008-04-09 11 +378 val_378 2008-04-09 11 +221 val_221 2008-04-09 11 +92 val_92 2008-04-09 11 +111 val_111 2008-04-09 11 +47 val_47 2008-04-09 11 +72 val_72 2008-04-09 11 +4 val_4 2008-04-09 11 +280 val_280 2008-04-09 11 +35 val_35 2008-04-09 11 +427 val_427 2008-04-09 11 +277 val_277 2008-04-09 11 +208 val_208 2008-04-09 11 +356 val_356 2008-04-09 11 +399 val_399 2008-04-09 11 +169 val_169 2008-04-09 11 +382 val_382 2008-04-09 11 +498 val_498 2008-04-09 11 +125 val_125 2008-04-09 11 +386 val_386 2008-04-09 11 +437 val_437 2008-04-09 11 +469 val_469 2008-04-09 11 +192 val_192 2008-04-09 11 +286 val_286 2008-04-09 11 +187 val_187 2008-04-09 11 +176 val_176 2008-04-09 11 +54 val_54 2008-04-09 11 +459 val_459 2008-04-09 11 +51 val_51 2008-04-09 11 +138 val_138 2008-04-09 11 +103 val_103 2008-04-09 11 +239 val_239 2008-04-09 11 +213 val_213 2008-04-09 11 +216 val_216 2008-04-09 11 +430 val_430 2008-04-09 11 +278 val_278 2008-04-09 11 +176 val_176 2008-04-09 11 +289 val_289 2008-04-09 11 +221 val_221 2008-04-09 11 +65 val_65 2008-04-09 11 +318 val_318 2008-04-09 11 +332 val_332 2008-04-09 11 +311 val_311 2008-04-09 11 +275 val_275 2008-04-09 11 +137 val_137 2008-04-09 11 +241 val_241 2008-04-09 11 +83 val_83 2008-04-09 11 +333 val_333 2008-04-09 11 +180 val_180 2008-04-09 11 +284 val_284 2008-04-09 11 +12 val_12 2008-04-09 11 +230 val_230 2008-04-09 11 +181 val_181 2008-04-09 11 +67 val_67 2008-04-09 11 +260 val_260 2008-04-09 11 +404 val_404 2008-04-09 11 +384 val_384 2008-04-09 11 +489 val_489 2008-04-09 11 +353 val_353 2008-04-09 11 +373 val_373 2008-04-09 11 +272 val_272 2008-04-09 11 +138 val_138 2008-04-09 11 +217 val_217 2008-04-09 11 +84 val_84 2008-04-09 11 +348 val_348 2008-04-09 11 +466 val_466 2008-04-09 11 +58 val_58 2008-04-09 11 +8 val_8 2008-04-09 11 +411 val_411 2008-04-09 11 +230 val_230 2008-04-09 11 +208 val_208 2008-04-09 11 +348 val_348 2008-04-09 11 +24 val_24 2008-04-09 11 +463 val_463 2008-04-09 11 +431 val_431 2008-04-09 11 +179 val_179 2008-04-09 11 +172 val_172 2008-04-09 11 +42 val_42 2008-04-09 11 +129 val_129 2008-04-09 11 +158 val_158 2008-04-09 11 +119 val_119 2008-04-09 11 +496 val_496 2008-04-09 11 +0 val_0 2008-04-09 11 +322 val_322 2008-04-09 11 +197 val_197 2008-04-09 11 +468 val_468 2008-04-09 11 +393 val_393 2008-04-09 11 +454 val_454 2008-04-09 11 +100 val_100 2008-04-09 11 +298 val_298 2008-04-09 11 +199 val_199 2008-04-09 11 +191 val_191 2008-04-09 11 +418 val_418 2008-04-09 11 +96 val_96 2008-04-09 11 +26 val_26 2008-04-09 11 +165 val_165 2008-04-09 11 +327 val_327 2008-04-09 11 +230 val_230 2008-04-09 11 +205 val_205 2008-04-09 11 +120 val_120 2008-04-09 11 +131 val_131 2008-04-09 11 +51 val_51 2008-04-09 11 +404 val_404 2008-04-09 11 +43 val_43 2008-04-09 11 +436 val_436 2008-04-09 11 +156 val_156 2008-04-09 11 +469 val_469 2008-04-09 11 +468 val_468 2008-04-09 11 +308 val_308 2008-04-09 11 +95 val_95 2008-04-09 11 +196 val_196 2008-04-09 11 +288 val_288 2008-04-09 11 +481 val_481 2008-04-09 11 +457 val_457 2008-04-09 11 +98 val_98 2008-04-09 11 +282 val_282 2008-04-09 11 +197 val_197 2008-04-09 11 +187 val_187 2008-04-09 11 +318 val_318 2008-04-09 11 +318 val_318 2008-04-09 11 +409 val_409 2008-04-09 11 +470 val_470 2008-04-09 11 +137 val_137 2008-04-09 11 +369 val_369 2008-04-09 11 +316 val_316 2008-04-09 11 +169 val_169 2008-04-09 11 +413 val_413 2008-04-09 11 +85 val_85 2008-04-09 11 +77 val_77 2008-04-09 11 +0 val_0 2008-04-09 11 +490 val_490 2008-04-09 11 +87 val_87 2008-04-09 11 +364 val_364 2008-04-09 11 +179 val_179 2008-04-09 11 +118 val_118 2008-04-09 11 +134 val_134 2008-04-09 11 +395 val_395 2008-04-09 11 +282 val_282 2008-04-09 11 +138 val_138 2008-04-09 11 +238 val_238 2008-04-09 11 +419 val_419 2008-04-09 11 +15 val_15 2008-04-09 11 +118 val_118 2008-04-09 11 +72 val_72 2008-04-09 11 +90 val_90 2008-04-09 11 +307 val_307 2008-04-09 11 +19 val_19 2008-04-09 11 +435 val_435 2008-04-09 11 +10 val_10 2008-04-09 11 +277 val_277 2008-04-09 11 +273 val_273 2008-04-09 11 +306 val_306 2008-04-09 11 +224 val_224 2008-04-09 11 +309 val_309 2008-04-09 11 +389 val_389 2008-04-09 11 +327 val_327 2008-04-09 11 +242 val_242 2008-04-09 11 +369 val_369 2008-04-09 11 +392 val_392 2008-04-09 11 +272 val_272 2008-04-09 11 +331 val_331 2008-04-09 11 +401 val_401 2008-04-09 11 +242 val_242 2008-04-09 11 +452 val_452 2008-04-09 11 +177 val_177 2008-04-09 11 +226 val_226 2008-04-09 11 +5 val_5 2008-04-09 11 +497 val_497 2008-04-09 11 +402 val_402 2008-04-09 11 +396 val_396 2008-04-09 11 +317 val_317 2008-04-09 11 +395 val_395 2008-04-09 11 +58 val_58 2008-04-09 11 +35 val_35 2008-04-09 11 +336 val_336 2008-04-09 11 +95 val_95 2008-04-09 11 +11 val_11 2008-04-09 11 +168 val_168 2008-04-09 11 +34 val_34 2008-04-09 11 +229 val_229 2008-04-09 11 +233 val_233 2008-04-09 11 +143 val_143 2008-04-09 11 +472 val_472 2008-04-09 11 +322 val_322 2008-04-09 11 +498 val_498 2008-04-09 11 +160 val_160 2008-04-09 11 +195 val_195 2008-04-09 11 +42 val_42 2008-04-09 11 +321 val_321 2008-04-09 11 +430 val_430 2008-04-09 11 +119 val_119 2008-04-09 11 +489 val_489 2008-04-09 11 +458 val_458 2008-04-09 11 +78 val_78 2008-04-09 11 +76 val_76 2008-04-09 11 +41 val_41 2008-04-09 11 +223 val_223 2008-04-09 11 +492 val_492 2008-04-09 11 +149 val_149 2008-04-09 11 +449 val_449 2008-04-09 11 +218 val_218 2008-04-09 11 +228 val_228 2008-04-09 11 +138 val_138 2008-04-09 11 +453 val_453 2008-04-09 11 +30 val_30 2008-04-09 11 +209 val_209 2008-04-09 11 +64 val_64 2008-04-09 11 +468 val_468 2008-04-09 11 +76 val_76 2008-04-09 11 +74 val_74 2008-04-09 11 +342 val_342 2008-04-09 11 +69 val_69 2008-04-09 11 +230 val_230 2008-04-09 11 +33 val_33 2008-04-09 11 +368 val_368 2008-04-09 11 +103 val_103 2008-04-09 11 +296 val_296 2008-04-09 11 +113 val_113 2008-04-09 11 +216 val_216 2008-04-09 11 +367 val_367 2008-04-09 11 +344 val_344 2008-04-09 11 +167 val_167 2008-04-09 11 +274 val_274 2008-04-09 11 +219 val_219 2008-04-09 11 +239 val_239 2008-04-09 11 +485 val_485 2008-04-09 11 +116 val_116 2008-04-09 11 +223 val_223 2008-04-09 11 +256 val_256 2008-04-09 11 +263 val_263 2008-04-09 11 +70 val_70 2008-04-09 11 +487 val_487 2008-04-09 11 +480 val_480 2008-04-09 11 +401 val_401 2008-04-09 11 +288 val_288 2008-04-09 11 +191 val_191 2008-04-09 11 +5 val_5 2008-04-09 11 +244 val_244 2008-04-09 11 +438 val_438 2008-04-09 11 +128 val_128 2008-04-09 11 +467 val_467 2008-04-09 11 +432 val_432 2008-04-09 11 +202 val_202 2008-04-09 11 +316 val_316 2008-04-09 11 +229 val_229 2008-04-09 11 +469 val_469 2008-04-09 11 +463 val_463 2008-04-09 11 +280 val_280 2008-04-09 11 +2 val_2 2008-04-09 11 +35 val_35 2008-04-09 11 +283 val_283 2008-04-09 11 +331 val_331 2008-04-09 11 +235 val_235 2008-04-09 11 +80 val_80 2008-04-09 11 +44 val_44 2008-04-09 11 +193 val_193 2008-04-09 11 +321 val_321 2008-04-09 11 +335 val_335 2008-04-09 11 +104 val_104 2008-04-09 11 +466 val_466 2008-04-09 11 +366 val_366 2008-04-09 11 +175 val_175 2008-04-09 11 +403 val_403 2008-04-09 11 +483 val_483 2008-04-09 11 +53 val_53 2008-04-09 11 +105 val_105 2008-04-09 11 +257 val_257 2008-04-09 11 +406 val_406 2008-04-09 11 +409 val_409 2008-04-09 11 +190 val_190 2008-04-09 11 +406 val_406 2008-04-09 11 +401 val_401 2008-04-09 11 +114 val_114 2008-04-09 11 +258 val_258 2008-04-09 11 +90 val_90 2008-04-09 11 +203 val_203 2008-04-09 11 +262 val_262 2008-04-09 11 +348 val_348 2008-04-09 11 +424 val_424 2008-04-09 11 +12 val_12 2008-04-09 11 +396 val_396 2008-04-09 11 +201 val_201 2008-04-09 11 +217 val_217 2008-04-09 11 +164 val_164 2008-04-09 11 +431 val_431 2008-04-09 11 +454 val_454 2008-04-09 11 +478 val_478 2008-04-09 11 +298 val_298 2008-04-09 11 +125 val_125 2008-04-09 11 +431 val_431 2008-04-09 11 +164 val_164 2008-04-09 11 +424 val_424 2008-04-09 11 +187 val_187 2008-04-09 11 +382 val_382 2008-04-09 11 +5 val_5 2008-04-09 11 +70 val_70 2008-04-09 11 +397 val_397 2008-04-09 11 +480 val_480 2008-04-09 11 +291 val_291 2008-04-09 11 +24 val_24 2008-04-09 11 +351 val_351 2008-04-09 11 +255 val_255 2008-04-09 11 +104 val_104 2008-04-09 11 +70 val_70 2008-04-09 11 +163 val_163 2008-04-09 11 +438 val_438 2008-04-09 11 +119 val_119 2008-04-09 11 +414 val_414 2008-04-09 11 +200 val_200 2008-04-09 11 +491 val_491 2008-04-09 11 +237 val_237 2008-04-09 11 +439 val_439 2008-04-09 11 +360 val_360 2008-04-09 11 +248 val_248 2008-04-09 11 +479 val_479 2008-04-09 11 +305 val_305 2008-04-09 11 +417 val_417 2008-04-09 11 +199 val_199 2008-04-09 11 +444 val_444 2008-04-09 11 +120 val_120 2008-04-09 11 +429 val_429 2008-04-09 11 +169 val_169 2008-04-09 11 +443 val_443 2008-04-09 11 +323 val_323 2008-04-09 11 +325 val_325 2008-04-09 11 +277 val_277 2008-04-09 11 +230 val_230 2008-04-09 11 +478 val_478 2008-04-09 11 +178 val_178 2008-04-09 11 +468 val_468 2008-04-09 11 +310 val_310 2008-04-09 11 +317 val_317 2008-04-09 11 +333 val_333 2008-04-09 11 +493 val_493 2008-04-09 11 +460 val_460 2008-04-09 11 +207 val_207 2008-04-09 11 +249 val_249 2008-04-09 11 +265 val_265 2008-04-09 11 +480 val_480 2008-04-09 11 +83 val_83 2008-04-09 11 +136 val_136 2008-04-09 11 +353 val_353 2008-04-09 11 +172 val_172 2008-04-09 11 +214 val_214 2008-04-09 11 +462 val_462 2008-04-09 11 +233 val_233 2008-04-09 11 +406 val_406 2008-04-09 11 +133 val_133 2008-04-09 11 +175 val_175 2008-04-09 11 +189 val_189 2008-04-09 11 +454 val_454 2008-04-09 11 +375 val_375 2008-04-09 11 +401 val_401 2008-04-09 11 +421 val_421 2008-04-09 11 +407 val_407 2008-04-09 11 +384 val_384 2008-04-09 11 +256 val_256 2008-04-09 11 +26 val_26 2008-04-09 11 +134 val_134 2008-04-09 11 +67 val_67 2008-04-09 11 +384 val_384 2008-04-09 11 +379 val_379 2008-04-09 11 +18 val_18 2008-04-09 11 +462 val_462 2008-04-09 11 +492 val_492 2008-04-09 11 +100 val_100 2008-04-09 11 +298 val_298 2008-04-09 11 +9 val_9 2008-04-09 11 +341 val_341 2008-04-09 11 +498 val_498 2008-04-09 11 +146 val_146 2008-04-09 11 +458 val_458 2008-04-09 11 +362 val_362 2008-04-09 11 +186 val_186 2008-04-09 11 +285 val_285 2008-04-09 11 +348 val_348 2008-04-09 11 +167 val_167 2008-04-09 11 +18 val_18 2008-04-09 11 +273 val_273 2008-04-09 11 +183 val_183 2008-04-09 11 +281 val_281 2008-04-09 11 +344 val_344 2008-04-09 11 +97 val_97 2008-04-09 11 +469 val_469 2008-04-09 11 +315 val_315 2008-04-09 11 +84 val_84 2008-04-09 11 +28 val_28 2008-04-09 11 +37 val_37 2008-04-09 11 +448 val_448 2008-04-09 11 +152 val_152 2008-04-09 11 +348 val_348 2008-04-09 11 +307 val_307 2008-04-09 11 +194 val_194 2008-04-09 11 +414 val_414 2008-04-09 11 +477 val_477 2008-04-09 11 +222 val_222 2008-04-09 11 +126 val_126 2008-04-09 11 +90 val_90 2008-04-09 11 +169 val_169 2008-04-09 11 +403 val_403 2008-04-09 11 +400 val_400 2008-04-09 11 +200 val_200 2008-04-09 11 +97 val_97 2008-04-09 11 +238 val_238 2008-04-09 12 +86 val_86 2008-04-09 12 +311 val_311 2008-04-09 12 +27 val_27 2008-04-09 12 +165 val_165 2008-04-09 12 +409 val_409 2008-04-09 12 +255 val_255 2008-04-09 12 +278 val_278 2008-04-09 12 +98 val_98 2008-04-09 12 +484 val_484 2008-04-09 12 +265 val_265 2008-04-09 12 +193 val_193 2008-04-09 12 +401 val_401 2008-04-09 12 +150 val_150 2008-04-09 12 +273 val_273 2008-04-09 12 +224 val_224 2008-04-09 12 +369 val_369 2008-04-09 12 +66 val_66 2008-04-09 12 +128 val_128 2008-04-09 12 +213 val_213 2008-04-09 12 +146 val_146 2008-04-09 12 +406 val_406 2008-04-09 12 +429 val_429 2008-04-09 12 +374 val_374 2008-04-09 12 +152 val_152 2008-04-09 12 +469 val_469 2008-04-09 12 +145 val_145 2008-04-09 12 +495 val_495 2008-04-09 12 +37 val_37 2008-04-09 12 +327 val_327 2008-04-09 12 +281 val_281 2008-04-09 12 +277 val_277 2008-04-09 12 +209 val_209 2008-04-09 12 +15 val_15 2008-04-09 12 +82 val_82 2008-04-09 12 +403 val_403 2008-04-09 12 +166 val_166 2008-04-09 12 +417 val_417 2008-04-09 12 +430 val_430 2008-04-09 12 +252 val_252 2008-04-09 12 +292 val_292 2008-04-09 12 +219 val_219 2008-04-09 12 +287 val_287 2008-04-09 12 +153 val_153 2008-04-09 12 +193 val_193 2008-04-09 12 +338 val_338 2008-04-09 12 +446 val_446 2008-04-09 12 +459 val_459 2008-04-09 12 +394 val_394 2008-04-09 12 +237 val_237 2008-04-09 12 +482 val_482 2008-04-09 12 +174 val_174 2008-04-09 12 +413 val_413 2008-04-09 12 +494 val_494 2008-04-09 12 +207 val_207 2008-04-09 12 +199 val_199 2008-04-09 12 +466 val_466 2008-04-09 12 +208 val_208 2008-04-09 12 +174 val_174 2008-04-09 12 +399 val_399 2008-04-09 12 +396 val_396 2008-04-09 12 +247 val_247 2008-04-09 12 +417 val_417 2008-04-09 12 +489 val_489 2008-04-09 12 +162 val_162 2008-04-09 12 +377 val_377 2008-04-09 12 +397 val_397 2008-04-09 12 +309 val_309 2008-04-09 12 +365 val_365 2008-04-09 12 +266 val_266 2008-04-09 12 +439 val_439 2008-04-09 12 +342 val_342 2008-04-09 12 +367 val_367 2008-04-09 12 +325 val_325 2008-04-09 12 +167 val_167 2008-04-09 12 +195 val_195 2008-04-09 12 +475 val_475 2008-04-09 12 +17 val_17 2008-04-09 12 +113 val_113 2008-04-09 12 +155 val_155 2008-04-09 12 +203 val_203 2008-04-09 12 +339 val_339 2008-04-09 12 +0 val_0 2008-04-09 12 +455 val_455 2008-04-09 12 +128 val_128 2008-04-09 12 +311 val_311 2008-04-09 12 +316 val_316 2008-04-09 12 +57 val_57 2008-04-09 12 +302 val_302 2008-04-09 12 +205 val_205 2008-04-09 12 +149 val_149 2008-04-09 12 +438 val_438 2008-04-09 12 +345 val_345 2008-04-09 12 +129 val_129 2008-04-09 12 +170 val_170 2008-04-09 12 +20 val_20 2008-04-09 12 +489 val_489 2008-04-09 12 +157 val_157 2008-04-09 12 +378 val_378 2008-04-09 12 +221 val_221 2008-04-09 12 +92 val_92 2008-04-09 12 +111 val_111 2008-04-09 12 +47 val_47 2008-04-09 12 +72 val_72 2008-04-09 12 +4 val_4 2008-04-09 12 +280 val_280 2008-04-09 12 +35 val_35 2008-04-09 12 +427 val_427 2008-04-09 12 +277 val_277 2008-04-09 12 +208 val_208 2008-04-09 12 +356 val_356 2008-04-09 12 +399 val_399 2008-04-09 12 +169 val_169 2008-04-09 12 +382 val_382 2008-04-09 12 +498 val_498 2008-04-09 12 +125 val_125 2008-04-09 12 +386 val_386 2008-04-09 12 +437 val_437 2008-04-09 12 +469 val_469 2008-04-09 12 +192 val_192 2008-04-09 12 +286 val_286 2008-04-09 12 +187 val_187 2008-04-09 12 +176 val_176 2008-04-09 12 +54 val_54 2008-04-09 12 +459 val_459 2008-04-09 12 +51 val_51 2008-04-09 12 +138 val_138 2008-04-09 12 +103 val_103 2008-04-09 12 +239 val_239 2008-04-09 12 +213 val_213 2008-04-09 12 +216 val_216 2008-04-09 12 +430 val_430 2008-04-09 12 +278 val_278 2008-04-09 12 +176 val_176 2008-04-09 12 +289 val_289 2008-04-09 12 +221 val_221 2008-04-09 12 +65 val_65 2008-04-09 12 +318 val_318 2008-04-09 12 +332 val_332 2008-04-09 12 +311 val_311 2008-04-09 12 +275 val_275 2008-04-09 12 +137 val_137 2008-04-09 12 +241 val_241 2008-04-09 12 +83 val_83 2008-04-09 12 +333 val_333 2008-04-09 12 +180 val_180 2008-04-09 12 +284 val_284 2008-04-09 12 +12 val_12 2008-04-09 12 +230 val_230 2008-04-09 12 +181 val_181 2008-04-09 12 +67 val_67 2008-04-09 12 +260 val_260 2008-04-09 12 +404 val_404 2008-04-09 12 +384 val_384 2008-04-09 12 +489 val_489 2008-04-09 12 +353 val_353 2008-04-09 12 +373 val_373 2008-04-09 12 +272 val_272 2008-04-09 12 +138 val_138 2008-04-09 12 +217 val_217 2008-04-09 12 +84 val_84 2008-04-09 12 +348 val_348 2008-04-09 12 +466 val_466 2008-04-09 12 +58 val_58 2008-04-09 12 +8 val_8 2008-04-09 12 +411 val_411 2008-04-09 12 +230 val_230 2008-04-09 12 +208 val_208 2008-04-09 12 +348 val_348 2008-04-09 12 +24 val_24 2008-04-09 12 +463 val_463 2008-04-09 12 +431 val_431 2008-04-09 12 +179 val_179 2008-04-09 12 +172 val_172 2008-04-09 12 +42 val_42 2008-04-09 12 +129 val_129 2008-04-09 12 +158 val_158 2008-04-09 12 +119 val_119 2008-04-09 12 +496 val_496 2008-04-09 12 +0 val_0 2008-04-09 12 +322 val_322 2008-04-09 12 +197 val_197 2008-04-09 12 +468 val_468 2008-04-09 12 +393 val_393 2008-04-09 12 +454 val_454 2008-04-09 12 +100 val_100 2008-04-09 12 +298 val_298 2008-04-09 12 +199 val_199 2008-04-09 12 +191 val_191 2008-04-09 12 +418 val_418 2008-04-09 12 +96 val_96 2008-04-09 12 +26 val_26 2008-04-09 12 +165 val_165 2008-04-09 12 +327 val_327 2008-04-09 12 +230 val_230 2008-04-09 12 +205 val_205 2008-04-09 12 +120 val_120 2008-04-09 12 +131 val_131 2008-04-09 12 +51 val_51 2008-04-09 12 +404 val_404 2008-04-09 12 +43 val_43 2008-04-09 12 +436 val_436 2008-04-09 12 +156 val_156 2008-04-09 12 +469 val_469 2008-04-09 12 +468 val_468 2008-04-09 12 +308 val_308 2008-04-09 12 +95 val_95 2008-04-09 12 +196 val_196 2008-04-09 12 +288 val_288 2008-04-09 12 +481 val_481 2008-04-09 12 +457 val_457 2008-04-09 12 +98 val_98 2008-04-09 12 +282 val_282 2008-04-09 12 +197 val_197 2008-04-09 12 +187 val_187 2008-04-09 12 +318 val_318 2008-04-09 12 +318 val_318 2008-04-09 12 +409 val_409 2008-04-09 12 +470 val_470 2008-04-09 12 +137 val_137 2008-04-09 12 +369 val_369 2008-04-09 12 +316 val_316 2008-04-09 12 +169 val_169 2008-04-09 12 +413 val_413 2008-04-09 12 +85 val_85 2008-04-09 12 +77 val_77 2008-04-09 12 +0 val_0 2008-04-09 12 +490 val_490 2008-04-09 12 +87 val_87 2008-04-09 12 +364 val_364 2008-04-09 12 +179 val_179 2008-04-09 12 +118 val_118 2008-04-09 12 +134 val_134 2008-04-09 12 +395 val_395 2008-04-09 12 +282 val_282 2008-04-09 12 +138 val_138 2008-04-09 12 +238 val_238 2008-04-09 12 +419 val_419 2008-04-09 12 +15 val_15 2008-04-09 12 +118 val_118 2008-04-09 12 +72 val_72 2008-04-09 12 +90 val_90 2008-04-09 12 +307 val_307 2008-04-09 12 +19 val_19 2008-04-09 12 +435 val_435 2008-04-09 12 +10 val_10 2008-04-09 12 +277 val_277 2008-04-09 12 +273 val_273 2008-04-09 12 +306 val_306 2008-04-09 12 +224 val_224 2008-04-09 12 +309 val_309 2008-04-09 12 +389 val_389 2008-04-09 12 +327 val_327 2008-04-09 12 +242 val_242 2008-04-09 12 +369 val_369 2008-04-09 12 +392 val_392 2008-04-09 12 +272 val_272 2008-04-09 12 +331 val_331 2008-04-09 12 +401 val_401 2008-04-09 12 +242 val_242 2008-04-09 12 +452 val_452 2008-04-09 12 +177 val_177 2008-04-09 12 +226 val_226 2008-04-09 12 +5 val_5 2008-04-09 12 +497 val_497 2008-04-09 12 +402 val_402 2008-04-09 12 +396 val_396 2008-04-09 12 +317 val_317 2008-04-09 12 +395 val_395 2008-04-09 12 +58 val_58 2008-04-09 12 +35 val_35 2008-04-09 12 +336 val_336 2008-04-09 12 +95 val_95 2008-04-09 12 +11 val_11 2008-04-09 12 +168 val_168 2008-04-09 12 +34 val_34 2008-04-09 12 +229 val_229 2008-04-09 12 +233 val_233 2008-04-09 12 +143 val_143 2008-04-09 12 +472 val_472 2008-04-09 12 +322 val_322 2008-04-09 12 +498 val_498 2008-04-09 12 +160 val_160 2008-04-09 12 +195 val_195 2008-04-09 12 +42 val_42 2008-04-09 12 +321 val_321 2008-04-09 12 +430 val_430 2008-04-09 12 +119 val_119 2008-04-09 12 +489 val_489 2008-04-09 12 +458 val_458 2008-04-09 12 +78 val_78 2008-04-09 12 +76 val_76 2008-04-09 12 +41 val_41 2008-04-09 12 +223 val_223 2008-04-09 12 +492 val_492 2008-04-09 12 +149 val_149 2008-04-09 12 +449 val_449 2008-04-09 12 +218 val_218 2008-04-09 12 +228 val_228 2008-04-09 12 +138 val_138 2008-04-09 12 +453 val_453 2008-04-09 12 +30 val_30 2008-04-09 12 +209 val_209 2008-04-09 12 +64 val_64 2008-04-09 12 +468 val_468 2008-04-09 12 +76 val_76 2008-04-09 12 +74 val_74 2008-04-09 12 +342 val_342 2008-04-09 12 +69 val_69 2008-04-09 12 +230 val_230 2008-04-09 12 +33 val_33 2008-04-09 12 +368 val_368 2008-04-09 12 +103 val_103 2008-04-09 12 +296 val_296 2008-04-09 12 +113 val_113 2008-04-09 12 +216 val_216 2008-04-09 12 +367 val_367 2008-04-09 12 +344 val_344 2008-04-09 12 +167 val_167 2008-04-09 12 +274 val_274 2008-04-09 12 +219 val_219 2008-04-09 12 +239 val_239 2008-04-09 12 +485 val_485 2008-04-09 12 +116 val_116 2008-04-09 12 +223 val_223 2008-04-09 12 +256 val_256 2008-04-09 12 +263 val_263 2008-04-09 12 +70 val_70 2008-04-09 12 +487 val_487 2008-04-09 12 +480 val_480 2008-04-09 12 +401 val_401 2008-04-09 12 +288 val_288 2008-04-09 12 +191 val_191 2008-04-09 12 +5 val_5 2008-04-09 12 +244 val_244 2008-04-09 12 +438 val_438 2008-04-09 12 +128 val_128 2008-04-09 12 +467 val_467 2008-04-09 12 +432 val_432 2008-04-09 12 +202 val_202 2008-04-09 12 +316 val_316 2008-04-09 12 +229 val_229 2008-04-09 12 +469 val_469 2008-04-09 12 +463 val_463 2008-04-09 12 +280 val_280 2008-04-09 12 +2 val_2 2008-04-09 12 +35 val_35 2008-04-09 12 +283 val_283 2008-04-09 12 +331 val_331 2008-04-09 12 +235 val_235 2008-04-09 12 +80 val_80 2008-04-09 12 +44 val_44 2008-04-09 12 +193 val_193 2008-04-09 12 +321 val_321 2008-04-09 12 +335 val_335 2008-04-09 12 +104 val_104 2008-04-09 12 +466 val_466 2008-04-09 12 +366 val_366 2008-04-09 12 +175 val_175 2008-04-09 12 +403 val_403 2008-04-09 12 +483 val_483 2008-04-09 12 +53 val_53 2008-04-09 12 +105 val_105 2008-04-09 12 +257 val_257 2008-04-09 12 +406 val_406 2008-04-09 12 +409 val_409 2008-04-09 12 +190 val_190 2008-04-09 12 +406 val_406 2008-04-09 12 +401 val_401 2008-04-09 12 +114 val_114 2008-04-09 12 +258 val_258 2008-04-09 12 +90 val_90 2008-04-09 12 +203 val_203 2008-04-09 12 +262 val_262 2008-04-09 12 +348 val_348 2008-04-09 12 +424 val_424 2008-04-09 12 +12 val_12 2008-04-09 12 +396 val_396 2008-04-09 12 +201 val_201 2008-04-09 12 +217 val_217 2008-04-09 12 +164 val_164 2008-04-09 12 +431 val_431 2008-04-09 12 +454 val_454 2008-04-09 12 +478 val_478 2008-04-09 12 +298 val_298 2008-04-09 12 +125 val_125 2008-04-09 12 +431 val_431 2008-04-09 12 +164 val_164 2008-04-09 12 +424 val_424 2008-04-09 12 +187 val_187 2008-04-09 12 +382 val_382 2008-04-09 12 +5 val_5 2008-04-09 12 +70 val_70 2008-04-09 12 +397 val_397 2008-04-09 12 +480 val_480 2008-04-09 12 +291 val_291 2008-04-09 12 +24 val_24 2008-04-09 12 +351 val_351 2008-04-09 12 +255 val_255 2008-04-09 12 +104 val_104 2008-04-09 12 +70 val_70 2008-04-09 12 +163 val_163 2008-04-09 12 +438 val_438 2008-04-09 12 +119 val_119 2008-04-09 12 +414 val_414 2008-04-09 12 +200 val_200 2008-04-09 12 +491 val_491 2008-04-09 12 +237 val_237 2008-04-09 12 +439 val_439 2008-04-09 12 +360 val_360 2008-04-09 12 +248 val_248 2008-04-09 12 +479 val_479 2008-04-09 12 +305 val_305 2008-04-09 12 +417 val_417 2008-04-09 12 +199 val_199 2008-04-09 12 +444 val_444 2008-04-09 12 +120 val_120 2008-04-09 12 +429 val_429 2008-04-09 12 +169 val_169 2008-04-09 12 +443 val_443 2008-04-09 12 +323 val_323 2008-04-09 12 +325 val_325 2008-04-09 12 +277 val_277 2008-04-09 12 +230 val_230 2008-04-09 12 +478 val_478 2008-04-09 12 +178 val_178 2008-04-09 12 +468 val_468 2008-04-09 12 +310 val_310 2008-04-09 12 +317 val_317 2008-04-09 12 +333 val_333 2008-04-09 12 +493 val_493 2008-04-09 12 +460 val_460 2008-04-09 12 +207 val_207 2008-04-09 12 +249 val_249 2008-04-09 12 +265 val_265 2008-04-09 12 +480 val_480 2008-04-09 12 +83 val_83 2008-04-09 12 +136 val_136 2008-04-09 12 +353 val_353 2008-04-09 12 +172 val_172 2008-04-09 12 +214 val_214 2008-04-09 12 +462 val_462 2008-04-09 12 +233 val_233 2008-04-09 12 +406 val_406 2008-04-09 12 +133 val_133 2008-04-09 12 +175 val_175 2008-04-09 12 +189 val_189 2008-04-09 12 +454 val_454 2008-04-09 12 +375 val_375 2008-04-09 12 +401 val_401 2008-04-09 12 +421 val_421 2008-04-09 12 +407 val_407 2008-04-09 12 +384 val_384 2008-04-09 12 +256 val_256 2008-04-09 12 +26 val_26 2008-04-09 12 +134 val_134 2008-04-09 12 +67 val_67 2008-04-09 12 +384 val_384 2008-04-09 12 +379 val_379 2008-04-09 12 +18 val_18 2008-04-09 12 +462 val_462 2008-04-09 12 +492 val_492 2008-04-09 12 +100 val_100 2008-04-09 12 +298 val_298 2008-04-09 12 +9 val_9 2008-04-09 12 +341 val_341 2008-04-09 12 +498 val_498 2008-04-09 12 +146 val_146 2008-04-09 12 +458 val_458 2008-04-09 12 +362 val_362 2008-04-09 12 +186 val_186 2008-04-09 12 +285 val_285 2008-04-09 12 +348 val_348 2008-04-09 12 +167 val_167 2008-04-09 12 +18 val_18 2008-04-09 12 +273 val_273 2008-04-09 12 +183 val_183 2008-04-09 12 +281 val_281 2008-04-09 12 +344 val_344 2008-04-09 12 +97 val_97 2008-04-09 12 +469 val_469 2008-04-09 12 +315 val_315 2008-04-09 12 +84 val_84 2008-04-09 12 +28 val_28 2008-04-09 12 +37 val_37 2008-04-09 12 +448 val_448 2008-04-09 12 +152 val_152 2008-04-09 12 +348 val_348 2008-04-09 12 +307 val_307 2008-04-09 12 +194 val_194 2008-04-09 12 +414 val_414 2008-04-09 12 +477 val_477 2008-04-09 12 +222 val_222 2008-04-09 12 +126 val_126 2008-04-09 12 +90 val_90 2008-04-09 12 +169 val_169 2008-04-09 12 +403 val_403 2008-04-09 12 +400 val_400 2008-04-09 12 +200 val_200 2008-04-09 12 +97 val_97 2008-04-09 12 Index: ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out (working copy) @@ -0,0 +1,230 @@ +PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin + +explain select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) +PREHOOK: type: QUERY +POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin + +explain select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcpart)) (TOK_TABREF (TOK_TABNAME src)) (= (. (TOK_TABLE_OR_COL srcpart) value) (. (TOK_TABLE_OR_COL src) value))) (TOK_TABREF (TOK_TABNAME src1)) (= (. (TOK_TABLE_OR_COL srcpart) key) (. (TOK_TABLE_OR_COL src1) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src1 + TableScan + alias: src1 + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 1 + Alias -> Map Operator Tree: + src + TableScan + alias: src + Reduce Output Operator + key expressions: + expr: value + type: string + sort order: + + Map-reduce partition columns: + expr: value + type: string + tag: 1 + Alias -> Map Operator Tree: + srcpart + TableScan + alias: srcpart + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} + 1 + handleSkewJoin: false + keys: + 0 [Column[value]] + 1 [Column[value]] + outputColumnNames: _col0 + Position of Big Table: 0 + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} + 1 + handleSkewJoin: false + keys: + 0 [Column[_col0]] + 1 [Column[key]] + outputColumnNames: _col0 + Position of Big Table: 0 + Select Operator + expressions: + expr: _col0 + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: explain select count(*) from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) group by ds +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) group by ds +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcpart)) (TOK_TABREF (TOK_TABNAME src)) (= (. (TOK_TABLE_OR_COL srcpart) value) (. (TOK_TABLE_OR_COL src) value))) (TOK_TABREF (TOK_TABNAME src1)) (= (. (TOK_TABLE_OR_COL srcpart) key) (. (TOK_TABLE_OR_COL src1) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTIONSTAR count))) (TOK_GROUPBY (TOK_TABLE_OR_COL ds)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src1 + TableScan + alias: src1 + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 1 + Alias -> Map Operator Tree: + src + TableScan + alias: src + Reduce Output Operator + key expressions: + expr: value + type: string + sort order: + + Map-reduce partition columns: + expr: value + type: string + tag: 1 + Alias -> Map Operator Tree: + srcpart + TableScan + alias: srcpart + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {ds} + 1 + handleSkewJoin: false + keys: + 0 [Column[value]] + 1 [Column[value]] + outputColumnNames: _col0, _col2 + Position of Big Table: 0 + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col2} + 1 + handleSkewJoin: false + keys: + 0 [Column[_col0]] + 1 [Column[key]] + outputColumnNames: _col2 + Position of Big Table: 0 + Select Operator + expressions: + expr: _col2 + type: string + outputColumnNames: _col2 + Group By Operator + aggregations: + expr: count() + bucketGroup: false + keys: + expr: _col2 + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col1 + type: bigint + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select count(*) from srcpart join src src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join src src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +5308 +5308 Index: ql/src/test/results/clientpositive/tez/mapreduce1.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/mapreduce1.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/mapreduce1.q.out (working copy) @@ -0,0 +1,696 @@ +PREHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: EXPLAIN +FROM src +INSERT OVERWRITE TABLE dest1 +MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value +USING 'cat' AS (tkey, ten, one, tvalue) +DISTRIBUTE BY tvalue, tkey +SORT BY ten, one +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +FROM src +INSERT OVERWRITE TABLE dest1 +MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value +USING 'cat' AS (tkey, ten, one, tvalue) +DISTRIBUTE BY tvalue, tkey +SORT BY ten, one +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL src) key) (TOK_FUNCTION TOK_INT (/ (. (TOK_TABLE_OR_COL src) key) 10)) (TOK_FUNCTION TOK_INT (% (. (TOK_TABLE_OR_COL src) key) 10)) (. (TOK_TABLE_OR_COL src) value)) TOK_SERDE TOK_RECORDWRITER 'cat' TOK_SERDE TOK_RECORDREADER (TOK_ALIASLIST tkey ten one tvalue)))) (TOK_DISTRIBUTEBY (TOK_TABLE_OR_COL tvalue) (TOK_TABLE_OR_COL tkey)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL ten)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL one))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: UDFToInteger((key / 10)) + type: int + expr: UDFToInteger((key % 10)) + type: int + expr: value + type: string + outputColumnNames: _col0, _col1, _col2, _col3 + Transform Operator + command: cat + output info: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reduce Output Operator + key expressions: + expr: _col1 + type: string + expr: _col2 + type: string + sort order: ++ + Map-reduce partition columns: + expr: _col3 + type: string + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col2 + type: string + expr: _col3 + type: string + Reduce Operator Tree: + Extract + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: UDFToInteger(_col1) + type: int + expr: UDFToInteger(_col2) + type: int + expr: _col3 + type: string + outputColumnNames: _col0, _col1, _col2, _col3 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE dest1 +MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value +USING 'cat' AS (tkey, ten, one, tvalue) +DISTRIBUTE BY tvalue, tkey +SORT BY ten, one +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE dest1 +MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value +USING 'cat' AS (tkey, ten, one, tvalue) +DISTRIBUTE BY tvalue, tkey +SORT BY ten, one +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1.* FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +#### A masked pattern was here #### +POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +0 0 0 val_0 +0 0 0 val_0 +0 0 0 val_0 +2 0 2 val_2 +4 0 4 val_4 +5 0 5 val_5 +5 0 5 val_5 +5 0 5 val_5 +8 0 8 val_8 +9 0 9 val_9 +10 1 0 val_10 +11 1 1 val_11 +12 1 2 val_12 +12 1 2 val_12 +15 1 5 val_15 +15 1 5 val_15 +17 1 7 val_17 +18 1 8 val_18 +18 1 8 val_18 +19 1 9 val_19 +100 10 0 val_100 +100 10 0 val_100 +103 10 3 val_103 +103 10 3 val_103 +104 10 4 val_104 +104 10 4 val_104 +105 10 5 val_105 +111 11 1 val_111 +113 11 3 val_113 +113 11 3 val_113 +114 11 4 val_114 +116 11 6 val_116 +118 11 8 val_118 +118 11 8 val_118 +119 11 9 val_119 +119 11 9 val_119 +119 11 9 val_119 +120 12 0 val_120 +120 12 0 val_120 +125 12 5 val_125 +125 12 5 val_125 +126 12 6 val_126 +128 12 8 val_128 +128 12 8 val_128 +128 12 8 val_128 +129 12 9 val_129 +129 12 9 val_129 +131 13 1 val_131 +133 13 3 val_133 +134 13 4 val_134 +134 13 4 val_134 +136 13 6 val_136 +137 13 7 val_137 +137 13 7 val_137 +138 13 8 val_138 +138 13 8 val_138 +138 13 8 val_138 +138 13 8 val_138 +143 14 3 val_143 +145 14 5 val_145 +146 14 6 val_146 +146 14 6 val_146 +149 14 9 val_149 +149 14 9 val_149 +150 15 0 val_150 +152 15 2 val_152 +152 15 2 val_152 +153 15 3 val_153 +155 15 5 val_155 +156 15 6 val_156 +157 15 7 val_157 +158 15 8 val_158 +160 16 0 val_160 +162 16 2 val_162 +163 16 3 val_163 +164 16 4 val_164 +164 16 4 val_164 +165 16 5 val_165 +165 16 5 val_165 +166 16 6 val_166 +167 16 7 val_167 +167 16 7 val_167 +167 16 7 val_167 +168 16 8 val_168 +169 16 9 val_169 +169 16 9 val_169 +169 16 9 val_169 +169 16 9 val_169 +170 17 0 val_170 +172 17 2 val_172 +172 17 2 val_172 +174 17 4 val_174 +174 17 4 val_174 +175 17 5 val_175 +175 17 5 val_175 +176 17 6 val_176 +176 17 6 val_176 +177 17 7 val_177 +178 17 8 val_178 +179 17 9 val_179 +179 17 9 val_179 +180 18 0 val_180 +181 18 1 val_181 +183 18 3 val_183 +186 18 6 val_186 +187 18 7 val_187 +187 18 7 val_187 +187 18 7 val_187 +189 18 9 val_189 +190 19 0 val_190 +191 19 1 val_191 +191 19 1 val_191 +192 19 2 val_192 +193 19 3 val_193 +193 19 3 val_193 +193 19 3 val_193 +194 19 4 val_194 +195 19 5 val_195 +195 19 5 val_195 +196 19 6 val_196 +197 19 7 val_197 +197 19 7 val_197 +199 19 9 val_199 +199 19 9 val_199 +199 19 9 val_199 +20 2 0 val_20 +24 2 4 val_24 +24 2 4 val_24 +26 2 6 val_26 +26 2 6 val_26 +27 2 7 val_27 +28 2 8 val_28 +200 20 0 val_200 +200 20 0 val_200 +201 20 1 val_201 +202 20 2 val_202 +203 20 3 val_203 +203 20 3 val_203 +205 20 5 val_205 +205 20 5 val_205 +207 20 7 val_207 +207 20 7 val_207 +208 20 8 val_208 +208 20 8 val_208 +208 20 8 val_208 +209 20 9 val_209 +209 20 9 val_209 +213 21 3 val_213 +213 21 3 val_213 +214 21 4 val_214 +216 21 6 val_216 +216 21 6 val_216 +217 21 7 val_217 +217 21 7 val_217 +218 21 8 val_218 +219 21 9 val_219 +219 21 9 val_219 +221 22 1 val_221 +221 22 1 val_221 +222 22 2 val_222 +223 22 3 val_223 +223 22 3 val_223 +224 22 4 val_224 +224 22 4 val_224 +226 22 6 val_226 +228 22 8 val_228 +229 22 9 val_229 +229 22 9 val_229 +230 23 0 val_230 +230 23 0 val_230 +230 23 0 val_230 +230 23 0 val_230 +230 23 0 val_230 +233 23 3 val_233 +233 23 3 val_233 +235 23 5 val_235 +237 23 7 val_237 +237 23 7 val_237 +238 23 8 val_238 +238 23 8 val_238 +239 23 9 val_239 +239 23 9 val_239 +241 24 1 val_241 +242 24 2 val_242 +242 24 2 val_242 +244 24 4 val_244 +247 24 7 val_247 +248 24 8 val_248 +249 24 9 val_249 +252 25 2 val_252 +255 25 5 val_255 +255 25 5 val_255 +256 25 6 val_256 +256 25 6 val_256 +257 25 7 val_257 +258 25 8 val_258 +260 26 0 val_260 +262 26 2 val_262 +263 26 3 val_263 +265 26 5 val_265 +265 26 5 val_265 +266 26 6 val_266 +272 27 2 val_272 +272 27 2 val_272 +273 27 3 val_273 +273 27 3 val_273 +273 27 3 val_273 +274 27 4 val_274 +275 27 5 val_275 +277 27 7 val_277 +277 27 7 val_277 +277 27 7 val_277 +277 27 7 val_277 +278 27 8 val_278 +278 27 8 val_278 +280 28 0 val_280 +280 28 0 val_280 +281 28 1 val_281 +281 28 1 val_281 +282 28 2 val_282 +282 28 2 val_282 +283 28 3 val_283 +284 28 4 val_284 +285 28 5 val_285 +286 28 6 val_286 +287 28 7 val_287 +288 28 8 val_288 +288 28 8 val_288 +289 28 9 val_289 +291 29 1 val_291 +292 29 2 val_292 +296 29 6 val_296 +298 29 8 val_298 +298 29 8 val_298 +298 29 8 val_298 +30 3 0 val_30 +33 3 3 val_33 +34 3 4 val_34 +35 3 5 val_35 +35 3 5 val_35 +35 3 5 val_35 +37 3 7 val_37 +37 3 7 val_37 +302 30 2 val_302 +305 30 5 val_305 +306 30 6 val_306 +307 30 7 val_307 +307 30 7 val_307 +308 30 8 val_308 +309 30 9 val_309 +309 30 9 val_309 +310 31 0 val_310 +311 31 1 val_311 +311 31 1 val_311 +311 31 1 val_311 +315 31 5 val_315 +316 31 6 val_316 +316 31 6 val_316 +316 31 6 val_316 +317 31 7 val_317 +317 31 7 val_317 +318 31 8 val_318 +318 31 8 val_318 +318 31 8 val_318 +321 32 1 val_321 +321 32 1 val_321 +322 32 2 val_322 +322 32 2 val_322 +323 32 3 val_323 +325 32 5 val_325 +325 32 5 val_325 +327 32 7 val_327 +327 32 7 val_327 +327 32 7 val_327 +331 33 1 val_331 +331 33 1 val_331 +332 33 2 val_332 +333 33 3 val_333 +333 33 3 val_333 +335 33 5 val_335 +336 33 6 val_336 +338 33 8 val_338 +339 33 9 val_339 +341 34 1 val_341 +342 34 2 val_342 +342 34 2 val_342 +344 34 4 val_344 +344 34 4 val_344 +345 34 5 val_345 +348 34 8 val_348 +348 34 8 val_348 +348 34 8 val_348 +348 34 8 val_348 +348 34 8 val_348 +351 35 1 val_351 +353 35 3 val_353 +353 35 3 val_353 +356 35 6 val_356 +360 36 0 val_360 +362 36 2 val_362 +364 36 4 val_364 +365 36 5 val_365 +366 36 6 val_366 +367 36 7 val_367 +367 36 7 val_367 +368 36 8 val_368 +369 36 9 val_369 +369 36 9 val_369 +369 36 9 val_369 +373 37 3 val_373 +374 37 4 val_374 +375 37 5 val_375 +377 37 7 val_377 +378 37 8 val_378 +379 37 9 val_379 +382 38 2 val_382 +382 38 2 val_382 +384 38 4 val_384 +384 38 4 val_384 +384 38 4 val_384 +386 38 6 val_386 +389 38 9 val_389 +392 39 2 val_392 +393 39 3 val_393 +394 39 4 val_394 +395 39 5 val_395 +395 39 5 val_395 +396 39 6 val_396 +396 39 6 val_396 +396 39 6 val_396 +397 39 7 val_397 +397 39 7 val_397 +399 39 9 val_399 +399 39 9 val_399 +41 4 1 val_41 +42 4 2 val_42 +42 4 2 val_42 +43 4 3 val_43 +44 4 4 val_44 +47 4 7 val_47 +400 40 0 val_400 +401 40 1 val_401 +401 40 1 val_401 +401 40 1 val_401 +401 40 1 val_401 +401 40 1 val_401 +402 40 2 val_402 +403 40 3 val_403 +403 40 3 val_403 +403 40 3 val_403 +404 40 4 val_404 +404 40 4 val_404 +406 40 6 val_406 +406 40 6 val_406 +406 40 6 val_406 +406 40 6 val_406 +407 40 7 val_407 +409 40 9 val_409 +409 40 9 val_409 +409 40 9 val_409 +411 41 1 val_411 +413 41 3 val_413 +413 41 3 val_413 +414 41 4 val_414 +414 41 4 val_414 +417 41 7 val_417 +417 41 7 val_417 +417 41 7 val_417 +418 41 8 val_418 +419 41 9 val_419 +421 42 1 val_421 +424 42 4 val_424 +424 42 4 val_424 +427 42 7 val_427 +429 42 9 val_429 +429 42 9 val_429 +430 43 0 val_430 +430 43 0 val_430 +430 43 0 val_430 +431 43 1 val_431 +431 43 1 val_431 +431 43 1 val_431 +432 43 2 val_432 +435 43 5 val_435 +436 43 6 val_436 +437 43 7 val_437 +438 43 8 val_438 +438 43 8 val_438 +438 43 8 val_438 +439 43 9 val_439 +439 43 9 val_439 +443 44 3 val_443 +444 44 4 val_444 +446 44 6 val_446 +448 44 8 val_448 +449 44 9 val_449 +452 45 2 val_452 +453 45 3 val_453 +454 45 4 val_454 +454 45 4 val_454 +454 45 4 val_454 +455 45 5 val_455 +457 45 7 val_457 +458 45 8 val_458 +458 45 8 val_458 +459 45 9 val_459 +459 45 9 val_459 +460 46 0 val_460 +462 46 2 val_462 +462 46 2 val_462 +463 46 3 val_463 +463 46 3 val_463 +466 46 6 val_466 +466 46 6 val_466 +466 46 6 val_466 +467 46 7 val_467 +468 46 8 val_468 +468 46 8 val_468 +468 46 8 val_468 +468 46 8 val_468 +469 46 9 val_469 +469 46 9 val_469 +469 46 9 val_469 +469 46 9 val_469 +469 46 9 val_469 +470 47 0 val_470 +472 47 2 val_472 +475 47 5 val_475 +477 47 7 val_477 +478 47 8 val_478 +478 47 8 val_478 +479 47 9 val_479 +480 48 0 val_480 +480 48 0 val_480 +480 48 0 val_480 +481 48 1 val_481 +482 48 2 val_482 +483 48 3 val_483 +484 48 4 val_484 +485 48 5 val_485 +487 48 7 val_487 +489 48 9 val_489 +489 48 9 val_489 +489 48 9 val_489 +489 48 9 val_489 +490 49 0 val_490 +491 49 1 val_491 +492 49 2 val_492 +492 49 2 val_492 +493 49 3 val_493 +494 49 4 val_494 +495 49 5 val_495 +496 49 6 val_496 +497 49 7 val_497 +498 49 8 val_498 +498 49 8 val_498 +498 49 8 val_498 +51 5 1 val_51 +51 5 1 val_51 +53 5 3 val_53 +54 5 4 val_54 +57 5 7 val_57 +58 5 8 val_58 +58 5 8 val_58 +64 6 4 val_64 +65 6 5 val_65 +66 6 6 val_66 +67 6 7 val_67 +67 6 7 val_67 +69 6 9 val_69 +70 7 0 val_70 +70 7 0 val_70 +70 7 0 val_70 +72 7 2 val_72 +72 7 2 val_72 +74 7 4 val_74 +76 7 6 val_76 +76 7 6 val_76 +77 7 7 val_77 +78 7 8 val_78 +80 8 0 val_80 +82 8 2 val_82 +83 8 3 val_83 +83 8 3 val_83 +84 8 4 val_84 +84 8 4 val_84 +85 8 5 val_85 +86 8 6 val_86 +87 8 7 val_87 +90 9 0 val_90 +90 9 0 val_90 +90 9 0 val_90 +92 9 2 val_92 +95 9 5 val_95 +95 9 5 val_95 +96 9 6 val_96 +97 9 7 val_97 +97 9 7 val_97 +98 9 8 val_98 +98 9 8 val_98 Index: ql/src/test/results/clientpositive/tez/mapreduce2.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/mapreduce2.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/mapreduce2.q.out (working copy) @@ -0,0 +1,687 @@ +PREHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: EXPLAIN +FROM src +INSERT OVERWRITE TABLE dest1 +MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value +USING 'cat' AS (tkey, ten, one, tvalue) +DISTRIBUTE BY tvalue, tkey +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +FROM src +INSERT OVERWRITE TABLE dest1 +MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value +USING 'cat' AS (tkey, ten, one, tvalue) +DISTRIBUTE BY tvalue, tkey +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL src) key) (TOK_FUNCTION TOK_INT (/ (. (TOK_TABLE_OR_COL src) key) 10)) (TOK_FUNCTION TOK_INT (% (. (TOK_TABLE_OR_COL src) key) 10)) (. (TOK_TABLE_OR_COL src) value)) TOK_SERDE TOK_RECORDWRITER 'cat' TOK_SERDE TOK_RECORDREADER (TOK_ALIASLIST tkey ten one tvalue)))) (TOK_DISTRIBUTEBY (TOK_TABLE_OR_COL tvalue) (TOK_TABLE_OR_COL tkey)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: UDFToInteger((key / 10)) + type: int + expr: UDFToInteger((key % 10)) + type: int + expr: value + type: string + outputColumnNames: _col0, _col1, _col2, _col3 + Transform Operator + command: cat + output info: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reduce Output Operator + sort order: + Map-reduce partition columns: + expr: _col3 + type: string + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col2 + type: string + expr: _col3 + type: string + Reduce Operator Tree: + Extract + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: UDFToInteger(_col1) + type: int + expr: UDFToInteger(_col2) + type: int + expr: _col3 + type: string + outputColumnNames: _col0, _col1, _col2, _col3 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE dest1 +MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value +USING 'cat' AS (tkey, ten, one, tvalue) +DISTRIBUTE BY tvalue, tkey +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE dest1 +MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value +USING 'cat' AS (tkey, ten, one, tvalue) +DISTRIBUTE BY tvalue, tkey +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM (SELECT dest1.* FROM dest1 DISTRIBUTE BY key SORT BY key, ten, one, value) T +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM (SELECT dest1.* FROM dest1 DISTRIBUTE BY key SORT BY key, ten, one, value) T +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +#### A masked pattern was here #### +POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +0 0 0 val_0 +0 0 0 val_0 +0 0 0 val_0 +2 0 2 val_2 +4 0 4 val_4 +5 0 5 val_5 +5 0 5 val_5 +5 0 5 val_5 +8 0 8 val_8 +9 0 9 val_9 +10 1 0 val_10 +11 1 1 val_11 +12 1 2 val_12 +12 1 2 val_12 +15 1 5 val_15 +15 1 5 val_15 +17 1 7 val_17 +18 1 8 val_18 +18 1 8 val_18 +19 1 9 val_19 +20 2 0 val_20 +24 2 4 val_24 +24 2 4 val_24 +26 2 6 val_26 +26 2 6 val_26 +27 2 7 val_27 +28 2 8 val_28 +30 3 0 val_30 +33 3 3 val_33 +34 3 4 val_34 +35 3 5 val_35 +35 3 5 val_35 +35 3 5 val_35 +37 3 7 val_37 +37 3 7 val_37 +41 4 1 val_41 +42 4 2 val_42 +42 4 2 val_42 +43 4 3 val_43 +44 4 4 val_44 +47 4 7 val_47 +51 5 1 val_51 +51 5 1 val_51 +53 5 3 val_53 +54 5 4 val_54 +57 5 7 val_57 +58 5 8 val_58 +58 5 8 val_58 +64 6 4 val_64 +65 6 5 val_65 +66 6 6 val_66 +67 6 7 val_67 +67 6 7 val_67 +69 6 9 val_69 +70 7 0 val_70 +70 7 0 val_70 +70 7 0 val_70 +72 7 2 val_72 +72 7 2 val_72 +74 7 4 val_74 +76 7 6 val_76 +76 7 6 val_76 +77 7 7 val_77 +78 7 8 val_78 +80 8 0 val_80 +82 8 2 val_82 +83 8 3 val_83 +83 8 3 val_83 +84 8 4 val_84 +84 8 4 val_84 +85 8 5 val_85 +86 8 6 val_86 +87 8 7 val_87 +90 9 0 val_90 +90 9 0 val_90 +90 9 0 val_90 +92 9 2 val_92 +95 9 5 val_95 +95 9 5 val_95 +96 9 6 val_96 +97 9 7 val_97 +97 9 7 val_97 +98 9 8 val_98 +98 9 8 val_98 +100 10 0 val_100 +100 10 0 val_100 +103 10 3 val_103 +103 10 3 val_103 +104 10 4 val_104 +104 10 4 val_104 +105 10 5 val_105 +111 11 1 val_111 +113 11 3 val_113 +113 11 3 val_113 +114 11 4 val_114 +116 11 6 val_116 +118 11 8 val_118 +118 11 8 val_118 +119 11 9 val_119 +119 11 9 val_119 +119 11 9 val_119 +120 12 0 val_120 +120 12 0 val_120 +125 12 5 val_125 +125 12 5 val_125 +126 12 6 val_126 +128 12 8 val_128 +128 12 8 val_128 +128 12 8 val_128 +129 12 9 val_129 +129 12 9 val_129 +131 13 1 val_131 +133 13 3 val_133 +134 13 4 val_134 +134 13 4 val_134 +136 13 6 val_136 +137 13 7 val_137 +137 13 7 val_137 +138 13 8 val_138 +138 13 8 val_138 +138 13 8 val_138 +138 13 8 val_138 +143 14 3 val_143 +145 14 5 val_145 +146 14 6 val_146 +146 14 6 val_146 +149 14 9 val_149 +149 14 9 val_149 +150 15 0 val_150 +152 15 2 val_152 +152 15 2 val_152 +153 15 3 val_153 +155 15 5 val_155 +156 15 6 val_156 +157 15 7 val_157 +158 15 8 val_158 +160 16 0 val_160 +162 16 2 val_162 +163 16 3 val_163 +164 16 4 val_164 +164 16 4 val_164 +165 16 5 val_165 +165 16 5 val_165 +166 16 6 val_166 +167 16 7 val_167 +167 16 7 val_167 +167 16 7 val_167 +168 16 8 val_168 +169 16 9 val_169 +169 16 9 val_169 +169 16 9 val_169 +169 16 9 val_169 +170 17 0 val_170 +172 17 2 val_172 +172 17 2 val_172 +174 17 4 val_174 +174 17 4 val_174 +175 17 5 val_175 +175 17 5 val_175 +176 17 6 val_176 +176 17 6 val_176 +177 17 7 val_177 +178 17 8 val_178 +179 17 9 val_179 +179 17 9 val_179 +180 18 0 val_180 +181 18 1 val_181 +183 18 3 val_183 +186 18 6 val_186 +187 18 7 val_187 +187 18 7 val_187 +187 18 7 val_187 +189 18 9 val_189 +190 19 0 val_190 +191 19 1 val_191 +191 19 1 val_191 +192 19 2 val_192 +193 19 3 val_193 +193 19 3 val_193 +193 19 3 val_193 +194 19 4 val_194 +195 19 5 val_195 +195 19 5 val_195 +196 19 6 val_196 +197 19 7 val_197 +197 19 7 val_197 +199 19 9 val_199 +199 19 9 val_199 +199 19 9 val_199 +200 20 0 val_200 +200 20 0 val_200 +201 20 1 val_201 +202 20 2 val_202 +203 20 3 val_203 +203 20 3 val_203 +205 20 5 val_205 +205 20 5 val_205 +207 20 7 val_207 +207 20 7 val_207 +208 20 8 val_208 +208 20 8 val_208 +208 20 8 val_208 +209 20 9 val_209 +209 20 9 val_209 +213 21 3 val_213 +213 21 3 val_213 +214 21 4 val_214 +216 21 6 val_216 +216 21 6 val_216 +217 21 7 val_217 +217 21 7 val_217 +218 21 8 val_218 +219 21 9 val_219 +219 21 9 val_219 +221 22 1 val_221 +221 22 1 val_221 +222 22 2 val_222 +223 22 3 val_223 +223 22 3 val_223 +224 22 4 val_224 +224 22 4 val_224 +226 22 6 val_226 +228 22 8 val_228 +229 22 9 val_229 +229 22 9 val_229 +230 23 0 val_230 +230 23 0 val_230 +230 23 0 val_230 +230 23 0 val_230 +230 23 0 val_230 +233 23 3 val_233 +233 23 3 val_233 +235 23 5 val_235 +237 23 7 val_237 +237 23 7 val_237 +238 23 8 val_238 +238 23 8 val_238 +239 23 9 val_239 +239 23 9 val_239 +241 24 1 val_241 +242 24 2 val_242 +242 24 2 val_242 +244 24 4 val_244 +247 24 7 val_247 +248 24 8 val_248 +249 24 9 val_249 +252 25 2 val_252 +255 25 5 val_255 +255 25 5 val_255 +256 25 6 val_256 +256 25 6 val_256 +257 25 7 val_257 +258 25 8 val_258 +260 26 0 val_260 +262 26 2 val_262 +263 26 3 val_263 +265 26 5 val_265 +265 26 5 val_265 +266 26 6 val_266 +272 27 2 val_272 +272 27 2 val_272 +273 27 3 val_273 +273 27 3 val_273 +273 27 3 val_273 +274 27 4 val_274 +275 27 5 val_275 +277 27 7 val_277 +277 27 7 val_277 +277 27 7 val_277 +277 27 7 val_277 +278 27 8 val_278 +278 27 8 val_278 +280 28 0 val_280 +280 28 0 val_280 +281 28 1 val_281 +281 28 1 val_281 +282 28 2 val_282 +282 28 2 val_282 +283 28 3 val_283 +284 28 4 val_284 +285 28 5 val_285 +286 28 6 val_286 +287 28 7 val_287 +288 28 8 val_288 +288 28 8 val_288 +289 28 9 val_289 +291 29 1 val_291 +292 29 2 val_292 +296 29 6 val_296 +298 29 8 val_298 +298 29 8 val_298 +298 29 8 val_298 +302 30 2 val_302 +305 30 5 val_305 +306 30 6 val_306 +307 30 7 val_307 +307 30 7 val_307 +308 30 8 val_308 +309 30 9 val_309 +309 30 9 val_309 +310 31 0 val_310 +311 31 1 val_311 +311 31 1 val_311 +311 31 1 val_311 +315 31 5 val_315 +316 31 6 val_316 +316 31 6 val_316 +316 31 6 val_316 +317 31 7 val_317 +317 31 7 val_317 +318 31 8 val_318 +318 31 8 val_318 +318 31 8 val_318 +321 32 1 val_321 +321 32 1 val_321 +322 32 2 val_322 +322 32 2 val_322 +323 32 3 val_323 +325 32 5 val_325 +325 32 5 val_325 +327 32 7 val_327 +327 32 7 val_327 +327 32 7 val_327 +331 33 1 val_331 +331 33 1 val_331 +332 33 2 val_332 +333 33 3 val_333 +333 33 3 val_333 +335 33 5 val_335 +336 33 6 val_336 +338 33 8 val_338 +339 33 9 val_339 +341 34 1 val_341 +342 34 2 val_342 +342 34 2 val_342 +344 34 4 val_344 +344 34 4 val_344 +345 34 5 val_345 +348 34 8 val_348 +348 34 8 val_348 +348 34 8 val_348 +348 34 8 val_348 +348 34 8 val_348 +351 35 1 val_351 +353 35 3 val_353 +353 35 3 val_353 +356 35 6 val_356 +360 36 0 val_360 +362 36 2 val_362 +364 36 4 val_364 +365 36 5 val_365 +366 36 6 val_366 +367 36 7 val_367 +367 36 7 val_367 +368 36 8 val_368 +369 36 9 val_369 +369 36 9 val_369 +369 36 9 val_369 +373 37 3 val_373 +374 37 4 val_374 +375 37 5 val_375 +377 37 7 val_377 +378 37 8 val_378 +379 37 9 val_379 +382 38 2 val_382 +382 38 2 val_382 +384 38 4 val_384 +384 38 4 val_384 +384 38 4 val_384 +386 38 6 val_386 +389 38 9 val_389 +392 39 2 val_392 +393 39 3 val_393 +394 39 4 val_394 +395 39 5 val_395 +395 39 5 val_395 +396 39 6 val_396 +396 39 6 val_396 +396 39 6 val_396 +397 39 7 val_397 +397 39 7 val_397 +399 39 9 val_399 +399 39 9 val_399 +400 40 0 val_400 +401 40 1 val_401 +401 40 1 val_401 +401 40 1 val_401 +401 40 1 val_401 +401 40 1 val_401 +402 40 2 val_402 +403 40 3 val_403 +403 40 3 val_403 +403 40 3 val_403 +404 40 4 val_404 +404 40 4 val_404 +406 40 6 val_406 +406 40 6 val_406 +406 40 6 val_406 +406 40 6 val_406 +407 40 7 val_407 +409 40 9 val_409 +409 40 9 val_409 +409 40 9 val_409 +411 41 1 val_411 +413 41 3 val_413 +413 41 3 val_413 +414 41 4 val_414 +414 41 4 val_414 +417 41 7 val_417 +417 41 7 val_417 +417 41 7 val_417 +418 41 8 val_418 +419 41 9 val_419 +421 42 1 val_421 +424 42 4 val_424 +424 42 4 val_424 +427 42 7 val_427 +429 42 9 val_429 +429 42 9 val_429 +430 43 0 val_430 +430 43 0 val_430 +430 43 0 val_430 +431 43 1 val_431 +431 43 1 val_431 +431 43 1 val_431 +432 43 2 val_432 +435 43 5 val_435 +436 43 6 val_436 +437 43 7 val_437 +438 43 8 val_438 +438 43 8 val_438 +438 43 8 val_438 +439 43 9 val_439 +439 43 9 val_439 +443 44 3 val_443 +444 44 4 val_444 +446 44 6 val_446 +448 44 8 val_448 +449 44 9 val_449 +452 45 2 val_452 +453 45 3 val_453 +454 45 4 val_454 +454 45 4 val_454 +454 45 4 val_454 +455 45 5 val_455 +457 45 7 val_457 +458 45 8 val_458 +458 45 8 val_458 +459 45 9 val_459 +459 45 9 val_459 +460 46 0 val_460 +462 46 2 val_462 +462 46 2 val_462 +463 46 3 val_463 +463 46 3 val_463 +466 46 6 val_466 +466 46 6 val_466 +466 46 6 val_466 +467 46 7 val_467 +468 46 8 val_468 +468 46 8 val_468 +468 46 8 val_468 +468 46 8 val_468 +469 46 9 val_469 +469 46 9 val_469 +469 46 9 val_469 +469 46 9 val_469 +469 46 9 val_469 +470 47 0 val_470 +472 47 2 val_472 +475 47 5 val_475 +477 47 7 val_477 +478 47 8 val_478 +478 47 8 val_478 +479 47 9 val_479 +480 48 0 val_480 +480 48 0 val_480 +480 48 0 val_480 +481 48 1 val_481 +482 48 2 val_482 +483 48 3 val_483 +484 48 4 val_484 +485 48 5 val_485 +487 48 7 val_487 +489 48 9 val_489 +489 48 9 val_489 +489 48 9 val_489 +489 48 9 val_489 +490 49 0 val_490 +491 49 1 val_491 +492 49 2 val_492 +492 49 2 val_492 +493 49 3 val_493 +494 49 4 val_494 +495 49 5 val_495 +496 49 6 val_496 +497 49 7 val_497 +498 49 8 val_498 +498 49 8 val_498 +498 49 8 val_498 Index: ql/src/test/results/clientpositive/tez/merge1.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/merge1.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/merge1.q.out (working copy) @@ -0,0 +1,778 @@ +PREHOOK: query: create table dest1(key int, val int) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table dest1(key int, val int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: explain +insert overwrite table dest1 +select key, count(1) from src group by key +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table dest1 +select key, count(1) from src group by key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + outputColumnNames: key + Group By Operator + aggregations: + expr: count(1) + bucketGroup: false + keys: + expr: key + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: UDFToInteger(_col1) + type: int + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table dest1 +select key, count(1) from src group by key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest1 +POSTHOOK: query: insert overwrite table dest1 +select key, count(1) from src group by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.val EXPRESSION [(src)src.null, ] +PREHOOK: query: select * from dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +#### A masked pattern was here #### +POSTHOOK: query: select * from dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +#### A masked pattern was here #### +POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.val EXPRESSION [(src)src.null, ] +0 3 +10 1 +100 2 +103 2 +104 2 +105 1 +11 1 +111 1 +113 2 +114 1 +116 1 +118 2 +119 3 +12 2 +120 2 +125 2 +126 1 +128 3 +129 2 +131 1 +133 1 +134 2 +136 1 +137 2 +138 4 +143 1 +145 1 +146 2 +149 2 +15 2 +150 1 +152 2 +153 1 +155 1 +156 1 +157 1 +158 1 +160 1 +162 1 +163 1 +164 2 +165 2 +166 1 +167 3 +168 1 +169 4 +17 1 +170 1 +172 2 +174 2 +175 2 +176 2 +177 1 +178 1 +179 2 +18 2 +180 1 +181 1 +183 1 +186 1 +187 3 +189 1 +19 1 +190 1 +191 2 +192 1 +193 3 +194 1 +195 2 +196 1 +197 2 +199 3 +2 1 +20 1 +200 2 +201 1 +202 1 +203 2 +205 2 +207 2 +208 3 +209 2 +213 2 +214 1 +216 2 +217 2 +218 1 +219 2 +221 2 +222 1 +223 2 +224 2 +226 1 +228 1 +229 2 +230 5 +233 2 +235 1 +237 2 +238 2 +239 2 +24 2 +241 1 +242 2 +244 1 +247 1 +248 1 +249 1 +252 1 +255 2 +256 2 +257 1 +258 1 +26 2 +260 1 +262 1 +263 1 +265 2 +266 1 +27 1 +272 2 +273 3 +274 1 +275 1 +277 4 +278 2 +28 1 +280 2 +281 2 +282 2 +283 1 +284 1 +285 1 +286 1 +287 1 +288 2 +289 1 +291 1 +292 1 +296 1 +298 3 +30 1 +302 1 +305 1 +306 1 +307 2 +308 1 +309 2 +310 1 +311 3 +315 1 +316 3 +317 2 +318 3 +321 2 +322 2 +323 1 +325 2 +327 3 +33 1 +331 2 +332 1 +333 2 +335 1 +336 1 +338 1 +339 1 +34 1 +341 1 +342 2 +344 2 +345 1 +348 5 +35 3 +351 1 +353 2 +356 1 +360 1 +362 1 +364 1 +365 1 +366 1 +367 2 +368 1 +369 3 +37 2 +373 1 +374 1 +375 1 +377 1 +378 1 +379 1 +382 2 +384 3 +386 1 +389 1 +392 1 +393 1 +394 1 +395 2 +396 3 +397 2 +399 2 +4 1 +400 1 +401 5 +402 1 +403 3 +404 2 +406 4 +407 1 +409 3 +41 1 +411 1 +413 2 +414 2 +417 3 +418 1 +419 1 +42 2 +421 1 +424 2 +427 1 +429 2 +43 1 +430 3 +431 3 +432 1 +435 1 +436 1 +437 1 +438 3 +439 2 +44 1 +443 1 +444 1 +446 1 +448 1 +449 1 +452 1 +453 1 +454 3 +455 1 +457 1 +458 2 +459 2 +460 1 +462 2 +463 2 +466 3 +467 1 +468 4 +469 5 +47 1 +470 1 +472 1 +475 1 +477 1 +478 2 +479 1 +480 3 +481 1 +482 1 +483 1 +484 1 +485 1 +487 1 +489 4 +490 1 +491 1 +492 2 +493 1 +494 1 +495 1 +496 1 +497 1 +498 3 +5 3 +51 2 +53 1 +54 1 +57 1 +58 2 +64 1 +65 1 +66 1 +67 2 +69 1 +70 3 +72 2 +74 1 +76 2 +77 1 +78 1 +8 1 +80 1 +82 1 +83 2 +84 2 +85 1 +86 1 +87 1 +9 1 +90 3 +92 1 +95 2 +96 1 +97 2 +98 2 +PREHOOK: query: drop table dest1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@dest1 +PREHOOK: Output: default@dest1 +POSTHOOK: query: drop table dest1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@dest1 +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.val EXPRESSION [(src)src.null, ] +PREHOOK: query: create table test_src(key string, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table test_src(key string, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@test_src +POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.val EXPRESSION [(src)src.null, ] +PREHOOK: query: create table dest1(key string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table dest1(key string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.val EXPRESSION [(src)src.null, ] +PREHOOK: query: insert overwrite table test_src partition(ds='101') select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_src@ds=101 +POSTHOOK: query: insert overwrite table test_src partition(ds='101') select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_src@ds=101 +POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.val EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table test_src partition(ds='102') select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_src@ds=102 +POSTHOOK: query: insert overwrite table test_src partition(ds='102') select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_src@ds=102 +POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.val EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain +insert overwrite table dest1 select key from test_src +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table dest1 select key from test_src +POSTHOOK: type: QUERY +POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.val EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME test_src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + test_src + TableScan + alias: test_src + Select Operator + expressions: + expr: key + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table dest1 select key from test_src +PREHOOK: type: QUERY +PREHOOK: Input: default@test_src +PREHOOK: Input: default@test_src@ds=101 +PREHOOK: Input: default@test_src@ds=102 +PREHOOK: Output: default@dest1 +POSTHOOK: query: insert overwrite table dest1 select key from test_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_src +POSTHOOK: Input: default@test_src@ds=101 +POSTHOOK: Input: default@test_src@ds=102 +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest1.val EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain +insert overwrite table dest1 select key from test_src +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table dest1 select key from test_src +POSTHOOK: type: QUERY +POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest1.val EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME test_src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + test_src + TableScan + alias: test_src + Select Operator + expressions: + expr: key + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table dest1 select key from test_src +PREHOOK: type: QUERY +PREHOOK: Input: default@test_src +PREHOOK: Input: default@test_src@ds=101 +PREHOOK: Input: default@test_src@ds=102 +PREHOOK: Output: default@dest1 +POSTHOOK: query: insert overwrite table dest1 select key from test_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_src +POSTHOOK: Input: default@test_src@ds=101 +POSTHOOK: Input: default@test_src@ds=102 +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest1.val EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/tez/merge2.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/merge2.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/merge2.q.out (working copy) @@ -0,0 +1,778 @@ +PREHOOK: query: create table test1(key int, val int) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table test1(key int, val int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@test1 +PREHOOK: query: explain +insert overwrite table test1 +select key, count(1) from src group by key +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table test1 +select key, count(1) from src group by key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME test1))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + outputColumnNames: key + Group By Operator + aggregations: + expr: count(1) + bucketGroup: false + keys: + expr: key + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: UDFToInteger(_col0) + type: int + expr: UDFToInteger(_col1) + type: int + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table test1 +select key, count(1) from src group by key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test1 +POSTHOOK: query: insert overwrite table test1 +select key, count(1) from src group by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test1 +POSTHOOK: Lineage: test1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test1.val EXPRESSION [(src)src.null, ] +PREHOOK: query: select * from test1 +PREHOOK: type: QUERY +PREHOOK: Input: default@test1 +#### A masked pattern was here #### +POSTHOOK: query: select * from test1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test1 +#### A masked pattern was here #### +POSTHOOK: Lineage: test1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test1.val EXPRESSION [(src)src.null, ] +0 3 +10 1 +100 2 +103 2 +104 2 +105 1 +11 1 +111 1 +113 2 +114 1 +116 1 +118 2 +119 3 +12 2 +120 2 +125 2 +126 1 +128 3 +129 2 +131 1 +133 1 +134 2 +136 1 +137 2 +138 4 +143 1 +145 1 +146 2 +149 2 +15 2 +150 1 +152 2 +153 1 +155 1 +156 1 +157 1 +158 1 +160 1 +162 1 +163 1 +164 2 +165 2 +166 1 +167 3 +168 1 +169 4 +17 1 +170 1 +172 2 +174 2 +175 2 +176 2 +177 1 +178 1 +179 2 +18 2 +180 1 +181 1 +183 1 +186 1 +187 3 +189 1 +19 1 +190 1 +191 2 +192 1 +193 3 +194 1 +195 2 +196 1 +197 2 +199 3 +2 1 +20 1 +200 2 +201 1 +202 1 +203 2 +205 2 +207 2 +208 3 +209 2 +213 2 +214 1 +216 2 +217 2 +218 1 +219 2 +221 2 +222 1 +223 2 +224 2 +226 1 +228 1 +229 2 +230 5 +233 2 +235 1 +237 2 +238 2 +239 2 +24 2 +241 1 +242 2 +244 1 +247 1 +248 1 +249 1 +252 1 +255 2 +256 2 +257 1 +258 1 +26 2 +260 1 +262 1 +263 1 +265 2 +266 1 +27 1 +272 2 +273 3 +274 1 +275 1 +277 4 +278 2 +28 1 +280 2 +281 2 +282 2 +283 1 +284 1 +285 1 +286 1 +287 1 +288 2 +289 1 +291 1 +292 1 +296 1 +298 3 +30 1 +302 1 +305 1 +306 1 +307 2 +308 1 +309 2 +310 1 +311 3 +315 1 +316 3 +317 2 +318 3 +321 2 +322 2 +323 1 +325 2 +327 3 +33 1 +331 2 +332 1 +333 2 +335 1 +336 1 +338 1 +339 1 +34 1 +341 1 +342 2 +344 2 +345 1 +348 5 +35 3 +351 1 +353 2 +356 1 +360 1 +362 1 +364 1 +365 1 +366 1 +367 2 +368 1 +369 3 +37 2 +373 1 +374 1 +375 1 +377 1 +378 1 +379 1 +382 2 +384 3 +386 1 +389 1 +392 1 +393 1 +394 1 +395 2 +396 3 +397 2 +399 2 +4 1 +400 1 +401 5 +402 1 +403 3 +404 2 +406 4 +407 1 +409 3 +41 1 +411 1 +413 2 +414 2 +417 3 +418 1 +419 1 +42 2 +421 1 +424 2 +427 1 +429 2 +43 1 +430 3 +431 3 +432 1 +435 1 +436 1 +437 1 +438 3 +439 2 +44 1 +443 1 +444 1 +446 1 +448 1 +449 1 +452 1 +453 1 +454 3 +455 1 +457 1 +458 2 +459 2 +460 1 +462 2 +463 2 +466 3 +467 1 +468 4 +469 5 +47 1 +470 1 +472 1 +475 1 +477 1 +478 2 +479 1 +480 3 +481 1 +482 1 +483 1 +484 1 +485 1 +487 1 +489 4 +490 1 +491 1 +492 2 +493 1 +494 1 +495 1 +496 1 +497 1 +498 3 +5 3 +51 2 +53 1 +54 1 +57 1 +58 2 +64 1 +65 1 +66 1 +67 2 +69 1 +70 3 +72 2 +74 1 +76 2 +77 1 +78 1 +8 1 +80 1 +82 1 +83 2 +84 2 +85 1 +86 1 +87 1 +9 1 +90 3 +92 1 +95 2 +96 1 +97 2 +98 2 +PREHOOK: query: drop table test1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@test1 +PREHOOK: Output: default@test1 +POSTHOOK: query: drop table test1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@test1 +POSTHOOK: Output: default@test1 +POSTHOOK: Lineage: test1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test1.val EXPRESSION [(src)src.null, ] +PREHOOK: query: create table test_src(key string, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table test_src(key string, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@test_src +POSTHOOK: Lineage: test1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test1.val EXPRESSION [(src)src.null, ] +PREHOOK: query: create table test1(key string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table test1(key string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@test1 +POSTHOOK: Lineage: test1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test1.val EXPRESSION [(src)src.null, ] +PREHOOK: query: insert overwrite table test_src partition(ds='101') select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_src@ds=101 +POSTHOOK: query: insert overwrite table test_src partition(ds='101') select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_src@ds=101 +POSTHOOK: Lineage: test1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test1.val EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table test_src partition(ds='102') select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@test_src@ds=102 +POSTHOOK: query: insert overwrite table test_src partition(ds='102') select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@test_src@ds=102 +POSTHOOK: Lineage: test1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test1.val EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain +insert overwrite table test1 select key from test_src +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table test1 select key from test_src +POSTHOOK: type: QUERY +POSTHOOK: Lineage: test1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test1.val EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME test_src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME test1))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + test_src + TableScan + alias: test_src + Select Operator + expressions: + expr: key + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table test1 select key from test_src +PREHOOK: type: QUERY +PREHOOK: Input: default@test_src +PREHOOK: Input: default@test_src@ds=101 +PREHOOK: Input: default@test_src@ds=102 +PREHOOK: Output: default@test1 +POSTHOOK: query: insert overwrite table test1 select key from test_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_src +POSTHOOK: Input: default@test_src@ds=101 +POSTHOOK: Input: default@test_src@ds=102 +POSTHOOK: Output: default@test1 +POSTHOOK: Lineage: test1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: test1.val EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain +insert overwrite table test1 select key from test_src +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table test1 select key from test_src +POSTHOOK: type: QUERY +POSTHOOK: Lineage: test1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: test1.val EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME test_src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME test1))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + test_src + TableScan + alias: test_src + Select Operator + expressions: + expr: key + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test1 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test1 + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test1 + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table test1 select key from test_src +PREHOOK: type: QUERY +PREHOOK: Input: default@test_src +PREHOOK: Input: default@test_src@ds=101 +PREHOOK: Input: default@test_src@ds=102 +PREHOOK: Output: default@test1 +POSTHOOK: query: insert overwrite table test1 select key from test_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_src +POSTHOOK: Input: default@test_src@ds=101 +POSTHOOK: Input: default@test_src@ds=102 +POSTHOOK: Output: default@test1 +POSTHOOK: Lineage: test1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: test1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: test1.val EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out (working copy) @@ -0,0 +1,1426 @@ +PREHOOK: query: create table over10k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal, + bin binary) + row format delimited + fields terminated by '|' +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table over10k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal, + bin binary) + row format delimited + fields terminated by '|' +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@over10k +PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +PREHOOK: type: LOAD +PREHOOK: Output: default@over10k +POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +POSTHOOK: type: LOAD +POSTHOOK: Output: default@over10k +PREHOOK: query: create table stats_tbl( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal, + bin binary) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table stats_tbl( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal, + bin binary) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@stats_tbl +PREHOOK: query: create table stats_tbl_part( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal, + bin binary) partitioned by (dt string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table stats_tbl_part( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal, + bin binary) partitioned by (dt string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@stats_tbl_part +PREHOOK: query: insert overwrite table stats_tbl select * from over10k +PREHOOK: type: QUERY +PREHOOK: Input: default@over10k +PREHOOK: Output: default@stats_tbl +POSTHOOK: query: insert overwrite table stats_tbl select * from over10k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@over10k +POSTHOOK: Output: default@stats_tbl +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +PREHOOK: query: insert into table stats_tbl_part partition (dt='2010') select * from over10k where t>0 and t<30 +PREHOOK: type: QUERY +PREHOOK: Input: default@over10k +PREHOOK: Output: default@stats_tbl_part@dt=2010 +POSTHOOK: query: insert into table stats_tbl_part partition (dt='2010') select * from over10k where t>0 and t<30 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@over10k +POSTHOOK: Output: default@stats_tbl_part@dt=2010 +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +PREHOOK: query: insert into table stats_tbl_part partition (dt='2011') select * from over10k where t>30 and t<60 +PREHOOK: type: QUERY +PREHOOK: Input: default@over10k +PREHOOK: Output: default@stats_tbl_part@dt=2011 +POSTHOOK: query: insert into table stats_tbl_part partition (dt='2011') select * from over10k where t>30 and t<60 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@over10k +POSTHOOK: Output: default@stats_tbl_part@dt=2011 +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +PREHOOK: query: insert into table stats_tbl_part partition (dt='2012') select * from over10k where t>60 +PREHOOK: type: QUERY +PREHOOK: Input: default@over10k +PREHOOK: Output: default@stats_tbl_part@dt=2012 +POSTHOOK: query: insert into table stats_tbl_part partition (dt='2012') select * from over10k where t>60 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@over10k +POSTHOOK: Output: default@stats_tbl_part@dt=2012 +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +PREHOOK: query: explain +select count(*), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b) from stats_tbl +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b) from stats_tbl +POSTHOOK: type: QUERY +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME stats_tbl))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTIONSTAR count)) (TOK_SELEXPR (TOK_FUNCTION count 1)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL s))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL bo))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL bin))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL si))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_TABLE_OR_COL i))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_TABLE_OR_COL b)))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + stats_tbl + TableScan + alias: stats_tbl + Select Operator + expressions: + expr: s + type: string + expr: bo + type: boolean + expr: bin + type: binary + expr: si + type: smallint + expr: i + type: int + expr: b + type: bigint + outputColumnNames: s, bo, bin, si, i, b + Group By Operator + aggregations: + expr: count() + expr: count(1) + expr: count(s) + expr: count(bo) + expr: count(bin) + expr: count(si) + expr: max(i) + expr: min(b) + bucketGroup: false + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Reduce Output Operator + sort order: + tag: -1 + value expressions: + expr: _col0 + type: bigint + expr: _col1 + type: bigint + expr: _col2 + type: bigint + expr: _col3 + type: bigint + expr: _col4 + type: bigint + expr: _col5 + type: bigint + expr: _col6 + type: int + expr: _col7 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + expr: count(VALUE._col1) + expr: count(VALUE._col2) + expr: count(VALUE._col3) + expr: count(VALUE._col4) + expr: count(VALUE._col5) + expr: max(VALUE._col6) + expr: min(VALUE._col7) + bucketGroup: false + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Select Operator + expressions: + expr: _col0 + type: bigint + expr: _col1 + type: bigint + expr: _col2 + type: bigint + expr: _col3 + type: bigint + expr: _col4 + type: bigint + expr: _col5 + type: bigint + expr: _col6 + type: int + expr: _col7 + type: bigint + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: explain +select count(*), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b) from stats_tbl_part +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b) from stats_tbl_part +POSTHOOK: type: QUERY +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME stats_tbl_part))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTIONSTAR count)) (TOK_SELEXPR (TOK_FUNCTION count 1)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL s))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL bo))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL bin))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL si))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_TABLE_OR_COL i))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_TABLE_OR_COL b)))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + stats_tbl_part + TableScan + alias: stats_tbl_part + Select Operator + expressions: + expr: s + type: string + expr: bo + type: boolean + expr: bin + type: binary + expr: si + type: smallint + expr: i + type: int + expr: b + type: bigint + outputColumnNames: s, bo, bin, si, i, b + Group By Operator + aggregations: + expr: count() + expr: count(1) + expr: count(s) + expr: count(bo) + expr: count(bin) + expr: count(si) + expr: max(i) + expr: min(b) + bucketGroup: false + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Reduce Output Operator + sort order: + tag: -1 + value expressions: + expr: _col0 + type: bigint + expr: _col1 + type: bigint + expr: _col2 + type: bigint + expr: _col3 + type: bigint + expr: _col4 + type: bigint + expr: _col5 + type: bigint + expr: _col6 + type: int + expr: _col7 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + expr: count(VALUE._col1) + expr: count(VALUE._col2) + expr: count(VALUE._col3) + expr: count(VALUE._col4) + expr: count(VALUE._col5) + expr: max(VALUE._col6) + expr: min(VALUE._col7) + bucketGroup: false + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Select Operator + expressions: + expr: _col0 + type: bigint + expr: _col1 + type: bigint + expr: _col2 + type: bigint + expr: _col3 + type: bigint + expr: _col4 + type: bigint + expr: _col5 + type: bigint + expr: _col6 + type: int + expr: _col7 + type: bigint + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: analyze table stats_tbl compute statistics for columns t,si,i,b,f,d,bo,s,bin +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl +#### A masked pattern was here #### +POSTHOOK: query: analyze table stats_tbl compute statistics for columns t,si,i,b,f,d,bo,s,bin +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl +#### A masked pattern was here #### +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +PREHOOK: query: analyze table stats_tbl_part partition(dt='2010') compute statistics for columns t,si,i,b,f,d,bo,s,bin +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part +PREHOOK: Input: default@stats_tbl_part@dt=2010 +#### A masked pattern was here #### +POSTHOOK: query: analyze table stats_tbl_part partition(dt='2010') compute statistics for columns t,si,i,b,f,d,bo,s,bin +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part +POSTHOOK: Input: default@stats_tbl_part@dt=2010 +#### A masked pattern was here #### +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +PREHOOK: query: analyze table stats_tbl_part partition(dt='2011') compute statistics for columns t,si,i,b,f,d,bo,s,bin +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part +PREHOOK: Input: default@stats_tbl_part@dt=2011 +#### A masked pattern was here #### +POSTHOOK: query: analyze table stats_tbl_part partition(dt='2011') compute statistics for columns t,si,i,b,f,d,bo,s,bin +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part +POSTHOOK: Input: default@stats_tbl_part@dt=2011 +#### A masked pattern was here #### +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +PREHOOK: query: analyze table stats_tbl_part partition(dt='2012') compute statistics for columns t,si,i,b,f,d,bo,s,bin +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part +PREHOOK: Input: default@stats_tbl_part@dt=2012 +#### A masked pattern was here #### +POSTHOOK: query: analyze table stats_tbl_part partition(dt='2012') compute statistics for columns t,si,i,b,f,d,bo,s,bin +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part +POSTHOOK: Input: default@stats_tbl_part@dt=2012 +#### A masked pattern was here #### +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +PREHOOK: query: explain +select count(*), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl +POSTHOOK: type: QUERY +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME stats_tbl))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTIONSTAR count)) (TOK_SELEXPR (TOK_FUNCTION count 1)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL s))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL bo))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL bin))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL si))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_TABLE_OR_COL i))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_TABLE_OR_COL f))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_TABLE_OR_COL d)))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + stats_tbl + TableScan + alias: stats_tbl + Select Operator + expressions: + expr: s + type: string + expr: bo + type: boolean + expr: bin + type: binary + expr: si + type: smallint + expr: i + type: int + expr: b + type: bigint + expr: f + type: float + expr: d + type: double + outputColumnNames: s, bo, bin, si, i, b, f, d + Group By Operator + aggregations: + expr: count() + expr: count(1) + expr: count(s) + expr: count(bo) + expr: count(bin) + expr: count(si) + expr: max(i) + expr: min(b) + expr: max(f) + expr: min(d) + bucketGroup: false + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 + Reduce Output Operator + sort order: + tag: -1 + value expressions: + expr: _col0 + type: bigint + expr: _col1 + type: bigint + expr: _col2 + type: bigint + expr: _col3 + type: bigint + expr: _col4 + type: bigint + expr: _col5 + type: bigint + expr: _col6 + type: int + expr: _col7 + type: bigint + expr: _col8 + type: float + expr: _col9 + type: double + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + expr: count(VALUE._col1) + expr: count(VALUE._col2) + expr: count(VALUE._col3) + expr: count(VALUE._col4) + expr: count(VALUE._col5) + expr: max(VALUE._col6) + expr: min(VALUE._col7) + expr: max(VALUE._col8) + expr: min(VALUE._col9) + bucketGroup: false + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 + Select Operator + expressions: + expr: _col0 + type: bigint + expr: _col1 + type: bigint + expr: _col2 + type: bigint + expr: _col3 + type: bigint + expr: _col4 + type: bigint + expr: _col5 + type: bigint + expr: _col6 + type: int + expr: _col7 + type: bigint + expr: _col8 + type: float + expr: _col9 + type: double + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select count(*), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl +#### A masked pattern was here #### +POSTHOOK: query: select count(*), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl +#### A masked pattern was here #### +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +9999 9999 9999 9999 9999 9999 65791 4294967296 99.98 0.01 +PREHOOK: query: explain +select count(*), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part +POSTHOOK: type: QUERY +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME stats_tbl_part))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTIONSTAR count)) (TOK_SELEXPR (TOK_FUNCTION count 1)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL s))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL bo))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL bin))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL si))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_TABLE_OR_COL i))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_TABLE_OR_COL f))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_TABLE_OR_COL d)))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + stats_tbl_part + TableScan + alias: stats_tbl_part + Select Operator + expressions: + expr: s + type: string + expr: bo + type: boolean + expr: bin + type: binary + expr: si + type: smallint + expr: i + type: int + expr: b + type: bigint + expr: f + type: float + expr: d + type: double + outputColumnNames: s, bo, bin, si, i, b, f, d + Group By Operator + aggregations: + expr: count() + expr: count(1) + expr: count(s) + expr: count(bo) + expr: count(bin) + expr: count(si) + expr: max(i) + expr: min(b) + expr: max(f) + expr: min(d) + bucketGroup: false + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 + Reduce Output Operator + sort order: + tag: -1 + value expressions: + expr: _col0 + type: bigint + expr: _col1 + type: bigint + expr: _col2 + type: bigint + expr: _col3 + type: bigint + expr: _col4 + type: bigint + expr: _col5 + type: bigint + expr: _col6 + type: int + expr: _col7 + type: bigint + expr: _col8 + type: float + expr: _col9 + type: double + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + expr: count(VALUE._col1) + expr: count(VALUE._col2) + expr: count(VALUE._col3) + expr: count(VALUE._col4) + expr: count(VALUE._col5) + expr: max(VALUE._col6) + expr: min(VALUE._col7) + expr: max(VALUE._col8) + expr: min(VALUE._col9) + bucketGroup: false + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 + Select Operator + expressions: + expr: _col0 + type: bigint + expr: _col1 + type: bigint + expr: _col2 + type: bigint + expr: _col3 + type: bigint + expr: _col4 + type: bigint + expr: _col5 + type: bigint + expr: _col6 + type: int + expr: _col7 + type: bigint + expr: _col8 + type: float + expr: _col9 + type: double + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select count(*), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part +PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part +PREHOOK: Input: default@stats_tbl_part@dt=2010 +PREHOOK: Input: default@stats_tbl_part@dt=2011 +PREHOOK: Input: default@stats_tbl_part@dt=2012 +#### A masked pattern was here #### +POSTHOOK: query: select count(*), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part +POSTHOOK: Input: default@stats_tbl_part@dt=2010 +POSTHOOK: Input: default@stats_tbl_part@dt=2011 +POSTHOOK: Input: default@stats_tbl_part@dt=2012 +#### A masked pattern was here #### +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +9489 9489 9489 9489 9489 9489 65791 4294967296 99.98 0.01 +PREHOOK: query: explain select count(ts) from stats_tbl_part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(ts) from stats_tbl_part +POSTHOOK: type: QUERY +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME stats_tbl_part))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL ts)))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + stats_tbl_part + TableScan + alias: stats_tbl_part + Select Operator + expressions: + expr: ts + type: timestamp + outputColumnNames: ts + Group By Operator + aggregations: + expr: count(ts) + bucketGroup: false + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + tag: -1 + value expressions: + expr: _col0 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + bucketGroup: false + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: + expr: _col0 + type: bigint + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: drop table stats_tbl +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@stats_tbl +PREHOOK: Output: default@stats_tbl +POSTHOOK: query: drop table stats_tbl +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@stats_tbl +POSTHOOK: Output: default@stats_tbl +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +PREHOOK: query: drop table stats_tbl_part +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@stats_tbl_part +PREHOOK: Output: default@stats_tbl_part +POSTHOOK: query: drop table stats_tbl_part +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@stats_tbl_part +POSTHOOK: Output: default@stats_tbl_part +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] Index: ql/src/test/results/clientpositive/tez/mrr.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/mrr.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/mrr.q.out (working copy) @@ -0,0 +1,2440 @@ +PREHOOK: query: -- simple query with multiple reduce stages +EXPLAIN SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt +PREHOOK: type: QUERY +POSTHOOK: query: -- simple query with multiple reduce stages +EXPLAIN SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL value)) cnt)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL cnt))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: key, value + Group By Operator + aggregations: + expr: count(value) + bucketGroup: false + keys: + expr: key + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col1 + type: bigint + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +490 1 +287 1 +286 1 +285 1 +284 1 +283 1 +114 1 +487 1 +485 1 +28 1 +484 1 +181 1 +275 1 +274 1 +183 1 +483 1 +27 1 +266 1 +482 1 +263 1 +262 1 +260 1 +481 1 +258 1 +257 1 +116 1 +479 1 +252 1 +249 1 +248 1 +247 1 +244 1 +92 1 +241 1 +477 1 +475 1 +472 1 +470 1 +235 1 +47 1 +186 1 +126 1 +228 1 +226 1 +131 1 +467 1 +222 1 +133 1 +82 1 +218 1 +80 1 +460 1 +214 1 +8 1 +78 1 +189 1 +457 1 +455 1 +136 1 +202 1 +201 1 +453 1 +20 1 +2 1 +19 1 +452 1 +196 1 +449 1 +194 1 +190 1 +192 1 +448 1 +446 1 +444 1 +443 1 +44 1 +77 1 +143 1 +437 1 +436 1 +435 1 +432 1 +145 1 +150 1 +43 1 +10 1 +427 1 +74 1 +421 1 +9 1 +419 1 +418 1 +153 1 +105 1 +69 1 +411 1 +41 1 +155 1 +407 1 +156 1 +87 1 +157 1 +402 1 +158 1 +400 1 +4 1 +66 1 +65 1 +160 1 +64 1 +394 1 +393 1 +392 1 +389 1 +386 1 +162 1 +86 1 +379 1 +378 1 +377 1 +375 1 +374 1 +373 1 +57 1 +163 1 +368 1 +54 1 +366 1 +365 1 +364 1 +362 1 +360 1 +356 1 +53 1 +351 1 +166 1 +168 1 +345 1 +85 1 +11 1 +341 1 +34 1 +339 1 +338 1 +336 1 +335 1 +111 1 +332 1 +497 1 +33 1 +17 1 +496 1 +323 1 +495 1 +494 1 +170 1 +493 1 +177 1 +315 1 +178 1 +310 1 +96 1 +308 1 +491 1 +306 1 +305 1 +302 1 +30 1 +180 1 +296 1 +292 1 +291 1 +289 1 +98 2 +97 2 +95 2 +84 2 +83 2 +76 2 +72 2 +67 2 +58 2 +51 2 +492 2 +478 2 +463 2 +462 2 +459 2 +458 2 +439 2 +429 2 +424 2 +42 2 +414 2 +413 2 +404 2 +399 2 +397 2 +395 2 +382 2 +37 2 +367 2 +353 2 +344 2 +342 2 +333 2 +331 2 +325 2 +322 2 +321 2 +317 2 +309 2 +307 2 +288 2 +282 2 +281 2 +280 2 +278 2 +272 2 +265 2 +26 2 +256 2 +255 2 +242 2 +24 2 +239 2 +238 2 +237 2 +233 2 +229 2 +224 2 +223 2 +221 2 +219 2 +217 2 +216 2 +213 2 +209 2 +207 2 +205 2 +203 2 +200 2 +197 2 +195 2 +191 2 +18 2 +179 2 +176 2 +175 2 +174 2 +172 2 +165 2 +164 2 +152 2 +15 2 +149 2 +146 2 +137 2 +134 2 +129 2 +125 2 +120 2 +12 2 +118 2 +113 2 +104 2 +103 2 +100 2 +498 3 +369 3 +384 3 +396 3 +403 3 +409 3 +417 3 +5 3 +430 3 +70 3 +119 3 +0 3 +431 3 +438 3 +480 3 +193 3 +199 3 +208 3 +187 3 +273 3 +298 3 +454 3 +311 3 +316 3 +466 3 +90 3 +128 3 +318 3 +327 3 +167 3 +35 3 +468 4 +489 4 +406 4 +169 4 +138 4 +277 4 +469 5 +401 5 +230 5 +348 5 +PREHOOK: query: -- join query with multiple reduce stages; +EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt +PREHOOK: type: QUERY +POSTHOOK: query: -- join query with multiple reduce stages; +EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) s1) (TOK_TABREF (TOK_TABNAME src) s2) (= (. (TOK_TABLE_OR_COL s1) key) (. (TOK_TABLE_OR_COL s2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL s2) key)) (TOK_SELEXPR (TOK_FUNCTIONDI count (. (TOK_TABLE_OR_COL s2) value)) cnt)) (TOK_GROUPBY (. (TOK_TABLE_OR_COL s2) key)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL cnt))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + s2 + TableScan + alias: s2 + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 1 + value expressions: + expr: key + type: string + expr: value + type: string + Alias -> Map Operator Tree: + s1 + TableScan + alias: s1 + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 0 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 {VALUE._col0} {VALUE._col1} + handleSkewJoin: false + outputColumnNames: _col4, _col5 + Select Operator + expressions: + expr: _col4 + type: string + expr: _col5 + type: string + outputColumnNames: _col4, _col5 + Group By Operator + aggregations: + expr: count(DISTINCT _col5) + bucketGroup: false + keys: + expr: _col4 + type: string + expr: _col5 + type: string + mode: hash + outputColumnNames: _col0, _col1, _col2 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + sort order: ++ + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col2 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(DISTINCT KEY._col1:0._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col1 + type: bigint + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +98 1 +97 1 +96 1 +95 1 +92 1 +90 1 +9 1 +87 1 +86 1 +85 1 +84 1 +83 1 +82 1 +80 1 +8 1 +78 1 +77 1 +76 1 +74 1 +72 1 +70 1 +69 1 +67 1 +66 1 +65 1 +64 1 +58 1 +57 1 +54 1 +53 1 +51 1 +5 1 +498 1 +497 1 +496 1 +495 1 +494 1 +493 1 +492 1 +491 1 +490 1 +489 1 +487 1 +485 1 +484 1 +483 1 +482 1 +481 1 +480 1 +479 1 +478 1 +477 1 +475 1 +472 1 +470 1 +47 1 +469 1 +468 1 +467 1 +466 1 +463 1 +462 1 +460 1 +459 1 +458 1 +457 1 +455 1 +454 1 +453 1 +452 1 +449 1 +448 1 +446 1 +444 1 +443 1 +44 1 +439 1 +438 1 +437 1 +436 1 +435 1 +432 1 +431 1 +430 1 +43 1 +429 1 +427 1 +424 1 +421 1 +42 1 +419 1 +418 1 +417 1 +414 1 +413 1 +411 1 +41 1 +409 1 +407 1 +406 1 +404 1 +403 1 +402 1 +401 1 +400 1 +4 1 +399 1 +397 1 +396 1 +395 1 +394 1 +393 1 +392 1 +389 1 +386 1 +384 1 +382 1 +379 1 +378 1 +377 1 +375 1 +374 1 +373 1 +37 1 +369 1 +368 1 +367 1 +366 1 +365 1 +364 1 +362 1 +360 1 +356 1 +353 1 +351 1 +35 1 +348 1 +345 1 +344 1 +342 1 +341 1 +34 1 +339 1 +338 1 +336 1 +335 1 +333 1 +332 1 +331 1 +33 1 +327 1 +325 1 +323 1 +322 1 +321 1 +318 1 +317 1 +316 1 +315 1 +311 1 +310 1 +309 1 +308 1 +307 1 +306 1 +305 1 +302 1 +30 1 +298 1 +296 1 +292 1 +291 1 +289 1 +288 1 +287 1 +286 1 +285 1 +284 1 +283 1 +282 1 +281 1 +280 1 +28 1 +278 1 +277 1 +275 1 +274 1 +273 1 +272 1 +27 1 +266 1 +265 1 +263 1 +262 1 +260 1 +26 1 +258 1 +257 1 +256 1 +255 1 +252 1 +249 1 +248 1 +247 1 +244 1 +242 1 +241 1 +24 1 +239 1 +238 1 +237 1 +235 1 +233 1 +230 1 +229 1 +228 1 +226 1 +224 1 +223 1 +222 1 +221 1 +219 1 +218 1 +217 1 +216 1 +214 1 +213 1 +209 1 +208 1 +207 1 +205 1 +203 1 +202 1 +201 1 +200 1 +20 1 +2 1 +199 1 +197 1 +196 1 +195 1 +194 1 +193 1 +192 1 +191 1 +190 1 +19 1 +189 1 +187 1 +186 1 +183 1 +181 1 +180 1 +18 1 +179 1 +178 1 +177 1 +176 1 +175 1 +174 1 +172 1 +170 1 +17 1 +169 1 +168 1 +167 1 +166 1 +165 1 +164 1 +163 1 +162 1 +160 1 +158 1 +157 1 +156 1 +155 1 +153 1 +152 1 +150 1 +15 1 +149 1 +146 1 +145 1 +143 1 +138 1 +137 1 +136 1 +134 1 +133 1 +131 1 +129 1 +128 1 +126 1 +125 1 +120 1 +12 1 +119 1 +118 1 +116 1 +114 1 +113 1 +111 1 +11 1 +105 1 +104 1 +103 1 +100 1 +10 1 +0 1 +PREHOOK: query: -- same query with broadcast join +EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt +PREHOOK: type: QUERY +POSTHOOK: query: -- same query with broadcast join +EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) s1) (TOK_TABREF (TOK_TABNAME src) s2) (= (. (TOK_TABLE_OR_COL s1) key) (. (TOK_TABLE_OR_COL s2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL s2) key)) (TOK_SELEXPR (TOK_FUNCTIONDI count (. (TOK_TABLE_OR_COL s2) value)) cnt)) (TOK_GROUPBY (. (TOK_TABLE_OR_COL s2) key)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL cnt))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + s2 + TableScan + alias: s2 + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 1 + value expressions: + expr: key + type: string + expr: value + type: string + Alias -> Map Operator Tree: + s1 + TableScan + alias: s1 + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 {key} {value} + handleSkewJoin: false + keys: + 0 [Column[key]] + 1 [Column[key]] + outputColumnNames: _col4, _col5 + Position of Big Table: 0 + Select Operator + expressions: + expr: _col4 + type: string + expr: _col5 + type: string + outputColumnNames: _col4, _col5 + Group By Operator + aggregations: + expr: count(DISTINCT _col5) + bucketGroup: false + keys: + expr: _col4 + type: string + expr: _col5 + type: string + mode: hash + outputColumnNames: _col0, _col1, _col2 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + sort order: ++ + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col2 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(DISTINCT KEY._col1:0._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col1 + type: bigint + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +98 1 +97 1 +96 1 +95 1 +92 1 +90 1 +9 1 +87 1 +86 1 +85 1 +84 1 +83 1 +82 1 +80 1 +8 1 +78 1 +77 1 +76 1 +74 1 +72 1 +70 1 +69 1 +67 1 +66 1 +65 1 +64 1 +58 1 +57 1 +54 1 +53 1 +51 1 +5 1 +498 1 +497 1 +496 1 +495 1 +494 1 +493 1 +492 1 +491 1 +490 1 +489 1 +487 1 +485 1 +484 1 +483 1 +482 1 +481 1 +480 1 +479 1 +478 1 +477 1 +475 1 +472 1 +470 1 +47 1 +469 1 +468 1 +467 1 +466 1 +463 1 +462 1 +460 1 +459 1 +458 1 +457 1 +455 1 +454 1 +453 1 +452 1 +449 1 +448 1 +446 1 +444 1 +443 1 +44 1 +439 1 +438 1 +437 1 +436 1 +435 1 +432 1 +431 1 +430 1 +43 1 +429 1 +427 1 +424 1 +421 1 +42 1 +419 1 +418 1 +417 1 +414 1 +413 1 +411 1 +41 1 +409 1 +407 1 +406 1 +404 1 +403 1 +402 1 +401 1 +400 1 +4 1 +399 1 +397 1 +396 1 +395 1 +394 1 +393 1 +392 1 +389 1 +386 1 +384 1 +382 1 +379 1 +378 1 +377 1 +375 1 +374 1 +373 1 +37 1 +369 1 +368 1 +367 1 +366 1 +365 1 +364 1 +362 1 +360 1 +356 1 +353 1 +351 1 +35 1 +348 1 +345 1 +344 1 +342 1 +341 1 +34 1 +339 1 +338 1 +336 1 +335 1 +333 1 +332 1 +331 1 +33 1 +327 1 +325 1 +323 1 +322 1 +321 1 +318 1 +317 1 +316 1 +315 1 +311 1 +310 1 +309 1 +308 1 +307 1 +306 1 +305 1 +302 1 +30 1 +298 1 +296 1 +292 1 +291 1 +289 1 +288 1 +287 1 +286 1 +285 1 +284 1 +283 1 +282 1 +281 1 +280 1 +28 1 +278 1 +277 1 +275 1 +274 1 +273 1 +272 1 +27 1 +266 1 +265 1 +263 1 +262 1 +260 1 +26 1 +258 1 +257 1 +256 1 +255 1 +252 1 +249 1 +248 1 +247 1 +244 1 +242 1 +241 1 +24 1 +239 1 +238 1 +237 1 +235 1 +233 1 +230 1 +229 1 +228 1 +226 1 +224 1 +223 1 +222 1 +221 1 +219 1 +218 1 +217 1 +216 1 +214 1 +213 1 +209 1 +208 1 +207 1 +205 1 +203 1 +202 1 +201 1 +200 1 +20 1 +2 1 +199 1 +197 1 +196 1 +195 1 +194 1 +193 1 +192 1 +191 1 +190 1 +19 1 +189 1 +187 1 +186 1 +183 1 +181 1 +180 1 +18 1 +179 1 +178 1 +177 1 +176 1 +175 1 +174 1 +172 1 +170 1 +17 1 +169 1 +168 1 +167 1 +166 1 +165 1 +164 1 +163 1 +162 1 +160 1 +158 1 +157 1 +156 1 +155 1 +153 1 +152 1 +150 1 +15 1 +149 1 +146 1 +145 1 +143 1 +138 1 +137 1 +136 1 +134 1 +133 1 +131 1 +129 1 +128 1 +126 1 +125 1 +120 1 +12 1 +119 1 +118 1 +116 1 +114 1 +113 1 +111 1 +11 1 +105 1 +104 1 +103 1 +100 1 +10 1 +0 1 +PREHOOK: query: -- query with multiple branches in the task dag +EXPLAIN +SELECT * +FROM + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s1 + JOIN + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s2 + JOIN + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s3 + ON (s1.key = s2.key and s1.key = s3.key) +WHERE + s1.cnt > 1 +ORDER BY s1.key +PREHOOK: type: QUERY +POSTHOOK: query: -- query with multiple branches in the task dag +EXPLAIN +SELECT * +FROM + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s1 + JOIN + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s2 + JOIN + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s3 + ON (s1.key = s2.key and s1.key = s3.key) +WHERE + s1.cnt > 1 +ORDER BY s1.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL value)) cnt)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL cnt))))) s1) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL value)) cnt)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL cnt))))) s2)) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL value)) cnt)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL cnt))))) s3) (and (= (. (TOK_TABLE_OR_COL s1) key) (. (TOK_TABLE_OR_COL s2) key)) (= (. (TOK_TABLE_OR_COL s1) key) (. (TOK_TABLE_OR_COL s3) key))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (> (. (TOK_TABLE_OR_COL s1) cnt) 1)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL s1) key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: key, value + Group By Operator + aggregations: + expr: count(value) + bucketGroup: false + keys: + expr: key + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col1 + type: bigint + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + Reduce Operator Tree: + Extract + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: 2 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: key, value + Group By Operator + aggregations: + expr: count(value) + bucketGroup: false + keys: + expr: key + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col1 + type: bigint + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + Reduce Operator Tree: + Extract + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: 1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: key, value + Group By Operator + aggregations: + expr: count(value) + bucketGroup: false + keys: + expr: key + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col1 + type: bigint + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + Reduce Operator Tree: + Extract + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: 0 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + condition expressions: + 0 {VALUE._col0} {VALUE._col1} + 1 {VALUE._col0} {VALUE._col1} + 2 {VALUE._col0} {VALUE._col1} + handleSkewJoin: false + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Filter Operator + predicate: + expr: (_col1 > 1) + type: boolean + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + expr: _col2 + type: string + expr: _col3 + type: bigint + expr: _col4 + type: string + expr: _col5 + type: bigint + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + expr: _col2 + type: string + expr: _col3 + type: bigint + expr: _col4 + type: string + expr: _col5 + type: bigint + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: SELECT * +FROM + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s1 + JOIN + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s2 + JOIN + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s3 + ON (s1.key = s2.key and s1.key = s3.key) +WHERE + s1.cnt > 1 +ORDER BY s1.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * +FROM + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s1 + JOIN + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s2 + JOIN + (SELECT key, count(value) as cnt + FROM src GROUP BY key ORDER BY cnt) s3 + ON (s1.key = s2.key and s1.key = s3.key) +WHERE + s1.cnt > 1 +ORDER BY s1.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 3 0 3 0 3 +100 2 100 2 100 2 +103 2 103 2 103 2 +104 2 104 2 104 2 +113 2 113 2 113 2 +118 2 118 2 118 2 +119 3 119 3 119 3 +12 2 12 2 12 2 +120 2 120 2 120 2 +125 2 125 2 125 2 +128 3 128 3 128 3 +129 2 129 2 129 2 +134 2 134 2 134 2 +137 2 137 2 137 2 +138 4 138 4 138 4 +146 2 146 2 146 2 +149 2 149 2 149 2 +15 2 15 2 15 2 +152 2 152 2 152 2 +164 2 164 2 164 2 +165 2 165 2 165 2 +167 3 167 3 167 3 +169 4 169 4 169 4 +172 2 172 2 172 2 +174 2 174 2 174 2 +175 2 175 2 175 2 +176 2 176 2 176 2 +179 2 179 2 179 2 +18 2 18 2 18 2 +187 3 187 3 187 3 +191 2 191 2 191 2 +193 3 193 3 193 3 +195 2 195 2 195 2 +197 2 197 2 197 2 +199 3 199 3 199 3 +200 2 200 2 200 2 +203 2 203 2 203 2 +205 2 205 2 205 2 +207 2 207 2 207 2 +208 3 208 3 208 3 +209 2 209 2 209 2 +213 2 213 2 213 2 +216 2 216 2 216 2 +217 2 217 2 217 2 +219 2 219 2 219 2 +221 2 221 2 221 2 +223 2 223 2 223 2 +224 2 224 2 224 2 +229 2 229 2 229 2 +230 5 230 5 230 5 +233 2 233 2 233 2 +237 2 237 2 237 2 +238 2 238 2 238 2 +239 2 239 2 239 2 +24 2 24 2 24 2 +242 2 242 2 242 2 +255 2 255 2 255 2 +256 2 256 2 256 2 +26 2 26 2 26 2 +265 2 265 2 265 2 +272 2 272 2 272 2 +273 3 273 3 273 3 +277 4 277 4 277 4 +278 2 278 2 278 2 +280 2 280 2 280 2 +281 2 281 2 281 2 +282 2 282 2 282 2 +288 2 288 2 288 2 +298 3 298 3 298 3 +307 2 307 2 307 2 +309 2 309 2 309 2 +311 3 311 3 311 3 +316 3 316 3 316 3 +317 2 317 2 317 2 +318 3 318 3 318 3 +321 2 321 2 321 2 +322 2 322 2 322 2 +325 2 325 2 325 2 +327 3 327 3 327 3 +331 2 331 2 331 2 +333 2 333 2 333 2 +342 2 342 2 342 2 +344 2 344 2 344 2 +348 5 348 5 348 5 +35 3 35 3 35 3 +353 2 353 2 353 2 +367 2 367 2 367 2 +369 3 369 3 369 3 +37 2 37 2 37 2 +382 2 382 2 382 2 +384 3 384 3 384 3 +395 2 395 2 395 2 +396 3 396 3 396 3 +397 2 397 2 397 2 +399 2 399 2 399 2 +401 5 401 5 401 5 +403 3 403 3 403 3 +404 2 404 2 404 2 +406 4 406 4 406 4 +409 3 409 3 409 3 +413 2 413 2 413 2 +414 2 414 2 414 2 +417 3 417 3 417 3 +42 2 42 2 42 2 +424 2 424 2 424 2 +429 2 429 2 429 2 +430 3 430 3 430 3 +431 3 431 3 431 3 +438 3 438 3 438 3 +439 2 439 2 439 2 +454 3 454 3 454 3 +458 2 458 2 458 2 +459 2 459 2 459 2 +462 2 462 2 462 2 +463 2 463 2 463 2 +466 3 466 3 466 3 +468 4 468 4 468 4 +469 5 469 5 469 5 +478 2 478 2 478 2 +480 3 480 3 480 3 +489 4 489 4 489 4 +492 2 492 2 492 2 +498 3 498 3 498 3 +5 3 5 3 5 3 +51 2 51 2 51 2 +58 2 58 2 58 2 +67 2 67 2 67 2 +70 3 70 3 70 3 +72 2 72 2 72 2 +76 2 76 2 76 2 +83 2 83 2 83 2 +84 2 84 2 84 2 +90 3 90 3 90 3 +95 2 95 2 95 2 +97 2 97 2 97 2 +98 2 98 2 98 2 +PREHOOK: query: -- query with broadcast join in the reduce stage +EXPLAIN +SELECT * +FROM + (SELECT key, count(value) as cnt FROM src GROUP BY key) s1 + JOIN src ON (s1.key = src.key) +PREHOOK: type: QUERY +POSTHOOK: query: -- query with broadcast join in the reduce stage +EXPLAIN +SELECT * +FROM + (SELECT key, count(value) as cnt FROM src GROUP BY key) s1 + JOIN src ON (s1.key = src.key) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL value)) cnt)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)))) s1) (TOK_TABREF (TOK_TABNAME src)) (= (. (TOK_TABLE_OR_COL s1) key) (. (TOK_TABLE_OR_COL src) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: key, value + Group By Operator + aggregations: + expr: count(value) + bucketGroup: false + keys: + expr: key + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: 0 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + Alias -> Map Operator Tree: + src + TableScan + alias: src + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} + 1 {key} {value} + handleSkewJoin: false + keys: + 0 [Column[_col0]] + 1 [Column[key]] + outputColumnNames: _col0, _col1, _col2, _col3 + Position of Big Table: 1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + expr: _col2 + type: string + expr: _col3 + type: string + outputColumnNames: _col0, _col1, _col2, _col3 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: SELECT * +FROM + (SELECT key, count(value) as cnt FROM src GROUP BY key) s1 + JOIN src ON (s1.key = src.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * +FROM + (SELECT key, count(value) as cnt FROM src GROUP BY key) s1 + JOIN src ON (s1.key = src.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +238 2 238 val_238 +86 1 86 val_86 +311 3 311 val_311 +27 1 27 val_27 +165 2 165 val_165 +409 3 409 val_409 +255 2 255 val_255 +278 2 278 val_278 +98 2 98 val_98 +484 1 484 val_484 +265 2 265 val_265 +193 3 193 val_193 +401 5 401 val_401 +150 1 150 val_150 +273 3 273 val_273 +224 2 224 val_224 +369 3 369 val_369 +66 1 66 val_66 +128 3 128 val_128 +213 2 213 val_213 +146 2 146 val_146 +406 4 406 val_406 +429 2 429 val_429 +374 1 374 val_374 +152 2 152 val_152 +469 5 469 val_469 +145 1 145 val_145 +495 1 495 val_495 +37 2 37 val_37 +327 3 327 val_327 +281 2 281 val_281 +277 4 277 val_277 +209 2 209 val_209 +15 2 15 val_15 +82 1 82 val_82 +403 3 403 val_403 +166 1 166 val_166 +417 3 417 val_417 +430 3 430 val_430 +252 1 252 val_252 +292 1 292 val_292 +219 2 219 val_219 +287 1 287 val_287 +153 1 153 val_153 +193 3 193 val_193 +338 1 338 val_338 +446 1 446 val_446 +459 2 459 val_459 +394 1 394 val_394 +237 2 237 val_237 +482 1 482 val_482 +174 2 174 val_174 +413 2 413 val_413 +494 1 494 val_494 +207 2 207 val_207 +199 3 199 val_199 +466 3 466 val_466 +208 3 208 val_208 +174 2 174 val_174 +399 2 399 val_399 +396 3 396 val_396 +247 1 247 val_247 +417 3 417 val_417 +489 4 489 val_489 +162 1 162 val_162 +377 1 377 val_377 +397 2 397 val_397 +309 2 309 val_309 +365 1 365 val_365 +266 1 266 val_266 +439 2 439 val_439 +342 2 342 val_342 +367 2 367 val_367 +325 2 325 val_325 +167 3 167 val_167 +195 2 195 val_195 +475 1 475 val_475 +17 1 17 val_17 +113 2 113 val_113 +155 1 155 val_155 +203 2 203 val_203 +339 1 339 val_339 +0 3 0 val_0 +455 1 455 val_455 +128 3 128 val_128 +311 3 311 val_311 +316 3 316 val_316 +57 1 57 val_57 +302 1 302 val_302 +205 2 205 val_205 +149 2 149 val_149 +438 3 438 val_438 +345 1 345 val_345 +129 2 129 val_129 +170 1 170 val_170 +20 1 20 val_20 +489 4 489 val_489 +157 1 157 val_157 +378 1 378 val_378 +221 2 221 val_221 +92 1 92 val_92 +111 1 111 val_111 +47 1 47 val_47 +72 2 72 val_72 +4 1 4 val_4 +280 2 280 val_280 +35 3 35 val_35 +427 1 427 val_427 +277 4 277 val_277 +208 3 208 val_208 +356 1 356 val_356 +399 2 399 val_399 +169 4 169 val_169 +382 2 382 val_382 +498 3 498 val_498 +125 2 125 val_125 +386 1 386 val_386 +437 1 437 val_437 +469 5 469 val_469 +192 1 192 val_192 +286 1 286 val_286 +187 3 187 val_187 +176 2 176 val_176 +54 1 54 val_54 +459 2 459 val_459 +51 2 51 val_51 +138 4 138 val_138 +103 2 103 val_103 +239 2 239 val_239 +213 2 213 val_213 +216 2 216 val_216 +430 3 430 val_430 +278 2 278 val_278 +176 2 176 val_176 +289 1 289 val_289 +221 2 221 val_221 +65 1 65 val_65 +318 3 318 val_318 +332 1 332 val_332 +311 3 311 val_311 +275 1 275 val_275 +137 2 137 val_137 +241 1 241 val_241 +83 2 83 val_83 +333 2 333 val_333 +180 1 180 val_180 +284 1 284 val_284 +12 2 12 val_12 +230 5 230 val_230 +181 1 181 val_181 +67 2 67 val_67 +260 1 260 val_260 +404 2 404 val_404 +384 3 384 val_384 +489 4 489 val_489 +353 2 353 val_353 +373 1 373 val_373 +272 2 272 val_272 +138 4 138 val_138 +217 2 217 val_217 +84 2 84 val_84 +348 5 348 val_348 +466 3 466 val_466 +58 2 58 val_58 +8 1 8 val_8 +411 1 411 val_411 +230 5 230 val_230 +208 3 208 val_208 +348 5 348 val_348 +24 2 24 val_24 +463 2 463 val_463 +431 3 431 val_431 +179 2 179 val_179 +172 2 172 val_172 +42 2 42 val_42 +129 2 129 val_129 +158 1 158 val_158 +119 3 119 val_119 +496 1 496 val_496 +0 3 0 val_0 +322 2 322 val_322 +197 2 197 val_197 +468 4 468 val_468 +393 1 393 val_393 +454 3 454 val_454 +100 2 100 val_100 +298 3 298 val_298 +199 3 199 val_199 +191 2 191 val_191 +418 1 418 val_418 +96 1 96 val_96 +26 2 26 val_26 +165 2 165 val_165 +327 3 327 val_327 +230 5 230 val_230 +205 2 205 val_205 +120 2 120 val_120 +131 1 131 val_131 +51 2 51 val_51 +404 2 404 val_404 +43 1 43 val_43 +436 1 436 val_436 +156 1 156 val_156 +469 5 469 val_469 +468 4 468 val_468 +308 1 308 val_308 +95 2 95 val_95 +196 1 196 val_196 +288 2 288 val_288 +481 1 481 val_481 +457 1 457 val_457 +98 2 98 val_98 +282 2 282 val_282 +197 2 197 val_197 +187 3 187 val_187 +318 3 318 val_318 +318 3 318 val_318 +409 3 409 val_409 +470 1 470 val_470 +137 2 137 val_137 +369 3 369 val_369 +316 3 316 val_316 +169 4 169 val_169 +413 2 413 val_413 +85 1 85 val_85 +77 1 77 val_77 +0 3 0 val_0 +490 1 490 val_490 +87 1 87 val_87 +364 1 364 val_364 +179 2 179 val_179 +118 2 118 val_118 +134 2 134 val_134 +395 2 395 val_395 +282 2 282 val_282 +138 4 138 val_138 +238 2 238 val_238 +419 1 419 val_419 +15 2 15 val_15 +118 2 118 val_118 +72 2 72 val_72 +90 3 90 val_90 +307 2 307 val_307 +19 1 19 val_19 +435 1 435 val_435 +10 1 10 val_10 +277 4 277 val_277 +273 3 273 val_273 +306 1 306 val_306 +224 2 224 val_224 +309 2 309 val_309 +389 1 389 val_389 +327 3 327 val_327 +242 2 242 val_242 +369 3 369 val_369 +392 1 392 val_392 +272 2 272 val_272 +331 2 331 val_331 +401 5 401 val_401 +242 2 242 val_242 +452 1 452 val_452 +177 1 177 val_177 +226 1 226 val_226 +5 3 5 val_5 +497 1 497 val_497 +402 1 402 val_402 +396 3 396 val_396 +317 2 317 val_317 +395 2 395 val_395 +58 2 58 val_58 +35 3 35 val_35 +336 1 336 val_336 +95 2 95 val_95 +11 1 11 val_11 +168 1 168 val_168 +34 1 34 val_34 +229 2 229 val_229 +233 2 233 val_233 +143 1 143 val_143 +472 1 472 val_472 +322 2 322 val_322 +498 3 498 val_498 +160 1 160 val_160 +195 2 195 val_195 +42 2 42 val_42 +321 2 321 val_321 +430 3 430 val_430 +119 3 119 val_119 +489 4 489 val_489 +458 2 458 val_458 +78 1 78 val_78 +76 2 76 val_76 +41 1 41 val_41 +223 2 223 val_223 +492 2 492 val_492 +149 2 149 val_149 +449 1 449 val_449 +218 1 218 val_218 +228 1 228 val_228 +138 4 138 val_138 +453 1 453 val_453 +30 1 30 val_30 +209 2 209 val_209 +64 1 64 val_64 +468 4 468 val_468 +76 2 76 val_76 +74 1 74 val_74 +342 2 342 val_342 +69 1 69 val_69 +230 5 230 val_230 +33 1 33 val_33 +368 1 368 val_368 +103 2 103 val_103 +296 1 296 val_296 +113 2 113 val_113 +216 2 216 val_216 +367 2 367 val_367 +344 2 344 val_344 +167 3 167 val_167 +274 1 274 val_274 +219 2 219 val_219 +239 2 239 val_239 +485 1 485 val_485 +116 1 116 val_116 +223 2 223 val_223 +256 2 256 val_256 +263 1 263 val_263 +70 3 70 val_70 +487 1 487 val_487 +480 3 480 val_480 +401 5 401 val_401 +288 2 288 val_288 +191 2 191 val_191 +5 3 5 val_5 +244 1 244 val_244 +438 3 438 val_438 +128 3 128 val_128 +467 1 467 val_467 +432 1 432 val_432 +202 1 202 val_202 +316 3 316 val_316 +229 2 229 val_229 +469 5 469 val_469 +463 2 463 val_463 +280 2 280 val_280 +2 1 2 val_2 +35 3 35 val_35 +283 1 283 val_283 +331 2 331 val_331 +235 1 235 val_235 +80 1 80 val_80 +44 1 44 val_44 +193 3 193 val_193 +321 2 321 val_321 +335 1 335 val_335 +104 2 104 val_104 +466 3 466 val_466 +366 1 366 val_366 +175 2 175 val_175 +403 3 403 val_403 +483 1 483 val_483 +53 1 53 val_53 +105 1 105 val_105 +257 1 257 val_257 +406 4 406 val_406 +409 3 409 val_409 +190 1 190 val_190 +406 4 406 val_406 +401 5 401 val_401 +114 1 114 val_114 +258 1 258 val_258 +90 3 90 val_90 +203 2 203 val_203 +262 1 262 val_262 +348 5 348 val_348 +424 2 424 val_424 +12 2 12 val_12 +396 3 396 val_396 +201 1 201 val_201 +217 2 217 val_217 +164 2 164 val_164 +431 3 431 val_431 +454 3 454 val_454 +478 2 478 val_478 +298 3 298 val_298 +125 2 125 val_125 +431 3 431 val_431 +164 2 164 val_164 +424 2 424 val_424 +187 3 187 val_187 +382 2 382 val_382 +5 3 5 val_5 +70 3 70 val_70 +397 2 397 val_397 +480 3 480 val_480 +291 1 291 val_291 +24 2 24 val_24 +351 1 351 val_351 +255 2 255 val_255 +104 2 104 val_104 +70 3 70 val_70 +163 1 163 val_163 +438 3 438 val_438 +119 3 119 val_119 +414 2 414 val_414 +200 2 200 val_200 +491 1 491 val_491 +237 2 237 val_237 +439 2 439 val_439 +360 1 360 val_360 +248 1 248 val_248 +479 1 479 val_479 +305 1 305 val_305 +417 3 417 val_417 +199 3 199 val_199 +444 1 444 val_444 +120 2 120 val_120 +429 2 429 val_429 +169 4 169 val_169 +443 1 443 val_443 +323 1 323 val_323 +325 2 325 val_325 +277 4 277 val_277 +230 5 230 val_230 +478 2 478 val_478 +178 1 178 val_178 +468 4 468 val_468 +310 1 310 val_310 +317 2 317 val_317 +333 2 333 val_333 +493 1 493 val_493 +460 1 460 val_460 +207 2 207 val_207 +249 1 249 val_249 +265 2 265 val_265 +480 3 480 val_480 +83 2 83 val_83 +136 1 136 val_136 +353 2 353 val_353 +172 2 172 val_172 +214 1 214 val_214 +462 2 462 val_462 +233 2 233 val_233 +406 4 406 val_406 +133 1 133 val_133 +175 2 175 val_175 +189 1 189 val_189 +454 3 454 val_454 +375 1 375 val_375 +401 5 401 val_401 +421 1 421 val_421 +407 1 407 val_407 +384 3 384 val_384 +256 2 256 val_256 +26 2 26 val_26 +134 2 134 val_134 +67 2 67 val_67 +384 3 384 val_384 +379 1 379 val_379 +18 2 18 val_18 +462 2 462 val_462 +492 2 492 val_492 +100 2 100 val_100 +298 3 298 val_298 +9 1 9 val_9 +341 1 341 val_341 +498 3 498 val_498 +146 2 146 val_146 +458 2 458 val_458 +362 1 362 val_362 +186 1 186 val_186 +285 1 285 val_285 +348 5 348 val_348 +167 3 167 val_167 +18 2 18 val_18 +273 3 273 val_273 +183 1 183 val_183 +281 2 281 val_281 +344 2 344 val_344 +97 2 97 val_97 +469 5 469 val_469 +315 1 315 val_315 +84 2 84 val_84 +28 1 28 val_28 +37 2 37 val_37 +448 1 448 val_448 +152 2 152 val_152 +348 5 348 val_348 +307 2 307 val_307 +194 1 194 val_194 +414 2 414 val_414 +477 1 477 val_477 +222 1 222 val_222 +126 1 126 val_126 +90 3 90 val_90 +169 4 169 val_169 +403 3 403 val_403 +400 1 400 val_400 +200 2 200 val_200 +97 2 97 val_97 Index: ql/src/test/results/clientpositive/tez/ptf.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/ptf.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/ptf.q.out (working copy) @@ -0,0 +1,1454 @@ +PREHOOK: query: DROP TABLE part +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE part +POSTHOOK: type: DROPTABLE +PREHOOK: query: -- data setup +CREATE TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +) +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- data setup +CREATE TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@part +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part +PREHOOK: type: LOAD +PREHOOK: Output: default@part +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part +POSTHOOK: type: LOAD +POSTHOOK: Output: default@part +PREHOOK: query: --1. test1 +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 +from noop(on part + partition by p_mfgr + order by p_name + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: --1. test1 +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 +from noop(on part + partition by p_mfgr + order by p_name + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 +Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 +Manufacturer#1 almond antique chartreuse lavender yellow 34 3 2 4100.06 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 3 5702.650000000001 +Manufacturer#1 almond aquamarine burnished black steel 28 5 4 7117.070000000001 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 5 8749.730000000001 +Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 1690.68 +Manufacturer#2 almond antique violet turquoise frosted 40 2 2 3491.38 +Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 5523.360000000001 +Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 7222.02 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 8923.62 +Manufacturer#3 almond antique chartreuse khaki white 17 1 1 1671.68 +Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 2861.95 +Manufacturer#3 almond antique metallic orange dim 19 3 3 4272.34 +Manufacturer#3 almond antique misty red olive 1 4 4 6195.32 +Manufacturer#3 almond antique olive coral navajo 45 5 5 7532.61 +Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 1620.67 +Manufacturer#4 almond antique violet mint lemon 39 2 2 2996.09 +Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 4202.35 +Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 6047.27 +Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 7337.620000000001 +Manufacturer#5 almond antique blue firebrick mint 31 1 1 1789.69 +Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 +Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 +Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 +PREHOOK: query: -- 2. testJoinWithNoop +select p_mfgr, p_name, +p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz +from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j +distribute by j.p_mfgr +sort by j.p_name) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 2. testJoinWithNoop +select p_mfgr, p_name, +p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz +from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j +distribute by j.p_mfgr +sort by j.p_name) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2 0 +Manufacturer#1 almond antique burnished rose metallic 2 0 +Manufacturer#1 almond antique burnished rose metallic 2 0 +Manufacturer#1 almond antique burnished rose metallic 2 0 +Manufacturer#1 almond antique chartreuse lavender yellow 34 32 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 -28 +Manufacturer#1 almond aquamarine burnished black steel 28 22 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 14 +Manufacturer#2 almond antique violet chocolate turquoise 14 0 +Manufacturer#2 almond antique violet turquoise frosted 40 26 +Manufacturer#2 almond aquamarine midnight light salmon 2 -38 +Manufacturer#2 almond aquamarine rose maroon antique 25 23 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 -7 +Manufacturer#3 almond antique chartreuse khaki white 17 0 +Manufacturer#3 almond antique forest lavender goldenrod 14 -3 +Manufacturer#3 almond antique metallic orange dim 19 5 +Manufacturer#3 almond antique misty red olive 1 -18 +Manufacturer#3 almond antique olive coral navajo 45 44 +Manufacturer#4 almond antique gainsboro frosted violet 10 0 +Manufacturer#4 almond antique violet mint lemon 39 29 +Manufacturer#4 almond aquamarine floral ivory bisque 27 -12 +Manufacturer#4 almond aquamarine yellow dodger mint 7 -20 +Manufacturer#4 almond azure aquamarine papaya violet 12 5 +Manufacturer#5 almond antique blue firebrick mint 31 0 +Manufacturer#5 almond antique medium spring khaki 6 -25 +Manufacturer#5 almond antique sky peru orange 2 -4 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 44 +Manufacturer#5 almond azure blanched chiffon midnight 23 -23 +PREHOOK: query: -- 3. testOnlyPTF +select p_mfgr, p_name, p_size +from noop(on part +partition by p_mfgr +order by p_name) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 3. testOnlyPTF +select p_mfgr, p_name, p_size +from noop(on part +partition by p_mfgr +order by p_name) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2 +Manufacturer#1 almond antique burnished rose metallic 2 +Manufacturer#1 almond antique chartreuse lavender yellow 34 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 +Manufacturer#1 almond aquamarine burnished black steel 28 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 +Manufacturer#2 almond antique violet chocolate turquoise 14 +Manufacturer#2 almond antique violet turquoise frosted 40 +Manufacturer#2 almond aquamarine midnight light salmon 2 +Manufacturer#2 almond aquamarine rose maroon antique 25 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 +Manufacturer#3 almond antique chartreuse khaki white 17 +Manufacturer#3 almond antique forest lavender goldenrod 14 +Manufacturer#3 almond antique metallic orange dim 19 +Manufacturer#3 almond antique misty red olive 1 +Manufacturer#3 almond antique olive coral navajo 45 +Manufacturer#4 almond antique gainsboro frosted violet 10 +Manufacturer#4 almond antique violet mint lemon 39 +Manufacturer#4 almond aquamarine floral ivory bisque 27 +Manufacturer#4 almond aquamarine yellow dodger mint 7 +Manufacturer#4 almond azure aquamarine papaya violet 12 +Manufacturer#5 almond antique blue firebrick mint 31 +Manufacturer#5 almond antique medium spring khaki 6 +Manufacturer#5 almond antique sky peru orange 2 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 +Manufacturer#5 almond azure blanched chiffon midnight 23 +PREHOOK: query: -- 4. testPTFAlias +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 +from noop(on part + partition by p_mfgr + order by p_name + ) abc +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 4. testPTFAlias +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 +from noop(on part + partition by p_mfgr + order by p_name + ) abc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 +Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 +Manufacturer#1 almond antique chartreuse lavender yellow 34 3 2 4100.06 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 3 5702.650000000001 +Manufacturer#1 almond aquamarine burnished black steel 28 5 4 7117.070000000001 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 5 8749.730000000001 +Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 1690.68 +Manufacturer#2 almond antique violet turquoise frosted 40 2 2 3491.38 +Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 5523.360000000001 +Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 7222.02 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 8923.62 +Manufacturer#3 almond antique chartreuse khaki white 17 1 1 1671.68 +Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 2861.95 +Manufacturer#3 almond antique metallic orange dim 19 3 3 4272.34 +Manufacturer#3 almond antique misty red olive 1 4 4 6195.32 +Manufacturer#3 almond antique olive coral navajo 45 5 5 7532.61 +Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 1620.67 +Manufacturer#4 almond antique violet mint lemon 39 2 2 2996.09 +Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 4202.35 +Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 6047.27 +Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 7337.620000000001 +Manufacturer#5 almond antique blue firebrick mint 31 1 1 1789.69 +Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 +Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 +Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 +PREHOOK: query: -- 5. testPTFAndWhereWithWindowing +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz +from noop(on part + partition by p_mfgr + order by p_name + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 5. testPTFAndWhereWithWindowing +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz +from noop(on part + partition by p_mfgr + order by p_name + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2 1 1 2 0 +Manufacturer#1 almond antique burnished rose metallic 2 1 1 2 0 +Manufacturer#1 almond antique chartreuse lavender yellow 34 3 2 34 32 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 3 6 -28 +Manufacturer#1 almond aquamarine burnished black steel 28 5 4 28 22 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 5 42 14 +Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 14 0 +Manufacturer#2 almond antique violet turquoise frosted 40 2 2 40 26 +Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 2 -38 +Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 25 23 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 18 -7 +Manufacturer#3 almond antique chartreuse khaki white 17 1 1 17 0 +Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 14 -3 +Manufacturer#3 almond antique metallic orange dim 19 3 3 19 5 +Manufacturer#3 almond antique misty red olive 1 4 4 1 -18 +Manufacturer#3 almond antique olive coral navajo 45 5 5 45 44 +Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 10 0 +Manufacturer#4 almond antique violet mint lemon 39 2 2 39 29 +Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 27 -12 +Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 7 -20 +Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 12 5 +Manufacturer#5 almond antique blue firebrick mint 31 1 1 31 0 +Manufacturer#5 almond antique medium spring khaki 6 2 2 6 -25 +Manufacturer#5 almond antique sky peru orange 2 3 3 2 -4 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 46 44 +Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 23 -23 +PREHOOK: query: -- 6. testSWQAndPTFAndGBy +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz +from noop(on part + partition by p_mfgr + order by p_name + ) +group by p_mfgr, p_name, p_size +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 6. testSWQAndPTFAndGBy +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz +from noop(on part + partition by p_mfgr + order by p_name + ) +group by p_mfgr, p_name, p_size +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2 1 1 2 0 +Manufacturer#1 almond antique chartreuse lavender yellow 34 2 2 34 32 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 3 3 6 -28 +Manufacturer#1 almond aquamarine burnished black steel 28 4 4 28 22 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 5 5 42 14 +Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 14 0 +Manufacturer#2 almond antique violet turquoise frosted 40 2 2 40 26 +Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 2 -38 +Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 25 23 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 18 -7 +Manufacturer#3 almond antique chartreuse khaki white 17 1 1 17 0 +Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 14 -3 +Manufacturer#3 almond antique metallic orange dim 19 3 3 19 5 +Manufacturer#3 almond antique misty red olive 1 4 4 1 -18 +Manufacturer#3 almond antique olive coral navajo 45 5 5 45 44 +Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 10 0 +Manufacturer#4 almond antique violet mint lemon 39 2 2 39 29 +Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 27 -12 +Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 7 -20 +Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 12 5 +Manufacturer#5 almond antique blue firebrick mint 31 1 1 31 0 +Manufacturer#5 almond antique medium spring khaki 6 2 2 6 -25 +Manufacturer#5 almond antique sky peru orange 2 3 3 2 -4 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 46 44 +Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 23 -23 +PREHOOK: query: -- 7. testJoin +select abc.* +from noop(on part +partition by p_mfgr +order by p_name +) abc join part p1 on abc.p_partkey = p1.p_partkey +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 7. testJoin +select abc.* +from noop(on part +partition by p_mfgr +order by p_name +) abc join part p1 on abc.p_partkey = p1.p_partkey +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +15103 almond aquamarine dodger light gainsboro Manufacturer#5 Brand#53 ECONOMY BURNISHED STEEL 46 LG PACK 1018.1 packages hinder carefu +17273 almond antique forest lavender goldenrod Manufacturer#3 Brand#35 PROMO ANODIZED TIN 14 JUMBO CASE 1190.27 along the +17927 almond aquamarine yellow dodger mint Manufacturer#4 Brand#41 ECONOMY BRUSHED COPPER 7 SM PKG 1844.92 ites. eve +33357 almond azure aquamarine papaya violet Manufacturer#4 Brand#41 STANDARD ANODIZED TIN 12 WRAP CASE 1290.35 reful +40982 almond antique misty red olive Manufacturer#3 Brand#32 ECONOMY PLATED COPPER 1 LG PKG 1922.98 c foxes can s +42669 almond antique medium spring khaki Manufacturer#5 Brand#51 STANDARD BURNISHED TIN 6 MED CAN 1611.66 sits haggl +45261 almond aquamarine floral ivory bisque Manufacturer#4 Brand#42 SMALL PLATED STEEL 27 WRAP CASE 1206.26 careful +48427 almond antique violet mint lemon Manufacturer#4 Brand#42 PROMO POLISHED STEEL 39 SM CASE 1375.42 hely ironic i +49671 almond antique gainsboro frosted violet Manufacturer#4 Brand#41 SMALL BRUSHED BRASS 10 SM BOX 1620.67 ccounts run quick +65667 almond aquamarine pink moccasin thistle Manufacturer#1 Brand#12 LARGE BURNISHED STEEL 42 JUMBO CASE 1632.66 e across the expr +78486 almond azure blanched chiffon midnight Manufacturer#5 Brand#52 LARGE BRUSHED BRASS 23 MED BAG 1464.48 hely blith +85768 almond antique chartreuse lavender yellow Manufacturer#1 Brand#12 LARGE BRUSHED STEEL 34 SM BAG 1753.76 refull +86428 almond aquamarine burnished black steel Manufacturer#1 Brand#12 STANDARD ANODIZED STEEL 28 WRAP BAG 1414.42 arefully +90681 almond antique chartreuse khaki white Manufacturer#3 Brand#31 MEDIUM BURNISHED TIN 17 SM CASE 1671.68 are slyly after the sl +105685 almond antique violet chocolate turquoise Manufacturer#2 Brand#22 MEDIUM ANODIZED COPPER 14 MED CAN 1690.68 ly pending requ +110592 almond antique salmon chartreuse burlywood Manufacturer#1 Brand#15 PROMO BURNISHED NICKEL 6 JUMBO PKG 1602.59 to the furiously +112398 almond antique metallic orange dim Manufacturer#3 Brand#32 MEDIUM BURNISHED BRASS 19 JUMBO JAR 1410.39 ole car +121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h +121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h +121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h +121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h +132666 almond aquamarine rose maroon antique Manufacturer#2 Brand#24 SMALL POLISHED NICKEL 25 MED BOX 1698.66 even +144293 almond antique olive coral navajo Manufacturer#3 Brand#34 STANDARD POLISHED STEEL 45 JUMBO CAN 1337.29 ag furiously about +146985 almond aquamarine midnight light salmon Manufacturer#2 Brand#23 MEDIUM BURNISHED COPPER 2 SM CASE 2031.98 s cajole caref +155733 almond antique sky peru orange Manufacturer#5 Brand#53 SMALL PLATED BRASS 2 WRAP DRUM 1788.73 furiously. bra +191709 almond antique violet turquoise frosted Manufacturer#2 Brand#22 ECONOMY POLISHED STEEL 40 MED BOX 1800.7 haggle +192697 almond antique blue firebrick mint Manufacturer#5 Brand#52 MEDIUM BURNISHED TIN 31 LG DRUM 1789.69 ickly ir +195606 almond aquamarine sandy cyan gainsboro Manufacturer#2 Brand#25 STANDARD PLATED TIN 18 SM PKG 1701.6 ic de +PREHOOK: query: -- 8. testJoinRight +select abc.* +from part p1 join noop(on part +partition by p_mfgr +order by p_name +) abc on abc.p_partkey = p1.p_partkey +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 8. testJoinRight +select abc.* +from part p1 join noop(on part +partition by p_mfgr +order by p_name +) abc on abc.p_partkey = p1.p_partkey +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +15103 almond aquamarine dodger light gainsboro Manufacturer#5 Brand#53 ECONOMY BURNISHED STEEL 46 LG PACK 1018.1 packages hinder carefu +17273 almond antique forest lavender goldenrod Manufacturer#3 Brand#35 PROMO ANODIZED TIN 14 JUMBO CASE 1190.27 along the +17927 almond aquamarine yellow dodger mint Manufacturer#4 Brand#41 ECONOMY BRUSHED COPPER 7 SM PKG 1844.92 ites. eve +33357 almond azure aquamarine papaya violet Manufacturer#4 Brand#41 STANDARD ANODIZED TIN 12 WRAP CASE 1290.35 reful +40982 almond antique misty red olive Manufacturer#3 Brand#32 ECONOMY PLATED COPPER 1 LG PKG 1922.98 c foxes can s +42669 almond antique medium spring khaki Manufacturer#5 Brand#51 STANDARD BURNISHED TIN 6 MED CAN 1611.66 sits haggl +45261 almond aquamarine floral ivory bisque Manufacturer#4 Brand#42 SMALL PLATED STEEL 27 WRAP CASE 1206.26 careful +48427 almond antique violet mint lemon Manufacturer#4 Brand#42 PROMO POLISHED STEEL 39 SM CASE 1375.42 hely ironic i +49671 almond antique gainsboro frosted violet Manufacturer#4 Brand#41 SMALL BRUSHED BRASS 10 SM BOX 1620.67 ccounts run quick +65667 almond aquamarine pink moccasin thistle Manufacturer#1 Brand#12 LARGE BURNISHED STEEL 42 JUMBO CASE 1632.66 e across the expr +78486 almond azure blanched chiffon midnight Manufacturer#5 Brand#52 LARGE BRUSHED BRASS 23 MED BAG 1464.48 hely blith +85768 almond antique chartreuse lavender yellow Manufacturer#1 Brand#12 LARGE BRUSHED STEEL 34 SM BAG 1753.76 refull +86428 almond aquamarine burnished black steel Manufacturer#1 Brand#12 STANDARD ANODIZED STEEL 28 WRAP BAG 1414.42 arefully +90681 almond antique chartreuse khaki white Manufacturer#3 Brand#31 MEDIUM BURNISHED TIN 17 SM CASE 1671.68 are slyly after the sl +105685 almond antique violet chocolate turquoise Manufacturer#2 Brand#22 MEDIUM ANODIZED COPPER 14 MED CAN 1690.68 ly pending requ +110592 almond antique salmon chartreuse burlywood Manufacturer#1 Brand#15 PROMO BURNISHED NICKEL 6 JUMBO PKG 1602.59 to the furiously +112398 almond antique metallic orange dim Manufacturer#3 Brand#32 MEDIUM BURNISHED BRASS 19 JUMBO JAR 1410.39 ole car +121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h +121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h +121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h +121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h +132666 almond aquamarine rose maroon antique Manufacturer#2 Brand#24 SMALL POLISHED NICKEL 25 MED BOX 1698.66 even +144293 almond antique olive coral navajo Manufacturer#3 Brand#34 STANDARD POLISHED STEEL 45 JUMBO CAN 1337.29 ag furiously about +146985 almond aquamarine midnight light salmon Manufacturer#2 Brand#23 MEDIUM BURNISHED COPPER 2 SM CASE 2031.98 s cajole caref +155733 almond antique sky peru orange Manufacturer#5 Brand#53 SMALL PLATED BRASS 2 WRAP DRUM 1788.73 furiously. bra +191709 almond antique violet turquoise frosted Manufacturer#2 Brand#22 ECONOMY POLISHED STEEL 40 MED BOX 1800.7 haggle +192697 almond antique blue firebrick mint Manufacturer#5 Brand#52 MEDIUM BURNISHED TIN 31 LG DRUM 1789.69 ickly ir +195606 almond aquamarine sandy cyan gainsboro Manufacturer#2 Brand#25 STANDARD PLATED TIN 18 SM PKG 1701.6 ic de +PREHOOK: query: -- 9. testNoopWithMap +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name, p_size desc) as r +from noopwithmap(on part +partition by p_mfgr +order by p_name, p_size desc) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 9. testNoopWithMap +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name, p_size desc) as r +from noopwithmap(on part +partition by p_mfgr +order by p_name, p_size desc) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2 1 +Manufacturer#1 almond antique burnished rose metallic 2 1 +Manufacturer#1 almond antique chartreuse lavender yellow 34 3 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 +Manufacturer#1 almond aquamarine burnished black steel 28 5 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 +Manufacturer#2 almond antique violet chocolate turquoise 14 1 +Manufacturer#2 almond antique violet turquoise frosted 40 2 +Manufacturer#2 almond aquamarine midnight light salmon 2 3 +Manufacturer#2 almond aquamarine rose maroon antique 25 4 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 +Manufacturer#3 almond antique chartreuse khaki white 17 1 +Manufacturer#3 almond antique forest lavender goldenrod 14 2 +Manufacturer#3 almond antique metallic orange dim 19 3 +Manufacturer#3 almond antique misty red olive 1 4 +Manufacturer#3 almond antique olive coral navajo 45 5 +Manufacturer#4 almond antique gainsboro frosted violet 10 1 +Manufacturer#4 almond antique violet mint lemon 39 2 +Manufacturer#4 almond aquamarine floral ivory bisque 27 3 +Manufacturer#4 almond aquamarine yellow dodger mint 7 4 +Manufacturer#4 almond azure aquamarine papaya violet 12 5 +Manufacturer#5 almond antique blue firebrick mint 31 1 +Manufacturer#5 almond antique medium spring khaki 6 2 +Manufacturer#5 almond antique sky peru orange 2 3 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 +Manufacturer#5 almond azure blanched chiffon midnight 23 5 +PREHOOK: query: -- 10. testNoopWithMapWithWindowing +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 +from noopwithmap(on part + partition by p_mfgr + order by p_name) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 10. testNoopWithMapWithWindowing +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 +from noopwithmap(on part + partition by p_mfgr + order by p_name) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 +Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 +Manufacturer#1 almond antique chartreuse lavender yellow 34 3 2 4100.06 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 3 5702.650000000001 +Manufacturer#1 almond aquamarine burnished black steel 28 5 4 7117.070000000001 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 5 8749.730000000001 +Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 1690.68 +Manufacturer#2 almond antique violet turquoise frosted 40 2 2 3491.38 +Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 5523.360000000001 +Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 7222.02 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 8923.62 +Manufacturer#3 almond antique chartreuse khaki white 17 1 1 1671.68 +Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 2861.95 +Manufacturer#3 almond antique metallic orange dim 19 3 3 4272.34 +Manufacturer#3 almond antique misty red olive 1 4 4 6195.32 +Manufacturer#3 almond antique olive coral navajo 45 5 5 7532.61 +Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 1620.67 +Manufacturer#4 almond antique violet mint lemon 39 2 2 2996.09 +Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 4202.35 +Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 6047.27 +Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 7337.620000000001 +Manufacturer#5 almond antique blue firebrick mint 31 1 1 1789.69 +Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 +Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 +Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 +PREHOOK: query: -- 11. testHavingWithWindowingPTFNoGBY +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 +from noop(on part +partition by p_mfgr +order by p_name) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 11. testHavingWithWindowingPTFNoGBY +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 +from noop(on part +partition by p_mfgr +order by p_name) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 +Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 +Manufacturer#1 almond antique chartreuse lavender yellow 34 3 2 4100.06 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 3 5702.650000000001 +Manufacturer#1 almond aquamarine burnished black steel 28 5 4 7117.070000000001 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 5 8749.730000000001 +Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 1690.68 +Manufacturer#2 almond antique violet turquoise frosted 40 2 2 3491.38 +Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 5523.360000000001 +Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 7222.02 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 8923.62 +Manufacturer#3 almond antique chartreuse khaki white 17 1 1 1671.68 +Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 2861.95 +Manufacturer#3 almond antique metallic orange dim 19 3 3 4272.34 +Manufacturer#3 almond antique misty red olive 1 4 4 6195.32 +Manufacturer#3 almond antique olive coral navajo 45 5 5 7532.61 +Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 1620.67 +Manufacturer#4 almond antique violet mint lemon 39 2 2 2996.09 +Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 4202.35 +Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 6047.27 +Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 7337.620000000001 +Manufacturer#5 almond antique blue firebrick mint 31 1 1 1789.69 +Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 +Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 +Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 +PREHOOK: query: -- 12. testFunctionChain +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 +from noop(on noopwithmap(on noop(on part +partition by p_mfgr +order by p_mfgr, p_name +))) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 12. testFunctionChain +select p_mfgr, p_name, p_size, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 +from noop(on noopwithmap(on noop(on part +partition by p_mfgr +order by p_mfgr, p_name +))) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 +Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 +Manufacturer#1 almond antique chartreuse lavender yellow 34 3 2 4100.06 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 3 5702.650000000001 +Manufacturer#1 almond aquamarine burnished black steel 28 5 4 7117.070000000001 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 5 8749.730000000001 +Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 1690.68 +Manufacturer#2 almond antique violet turquoise frosted 40 2 2 3491.38 +Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 5523.360000000001 +Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 7222.02 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 8923.62 +Manufacturer#3 almond antique chartreuse khaki white 17 1 1 1671.68 +Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 2861.95 +Manufacturer#3 almond antique metallic orange dim 19 3 3 4272.34 +Manufacturer#3 almond antique misty red olive 1 4 4 6195.32 +Manufacturer#3 almond antique olive coral navajo 45 5 5 7532.61 +Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 1620.67 +Manufacturer#4 almond antique violet mint lemon 39 2 2 2996.09 +Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 4202.35 +Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 6047.27 +Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 7337.620000000001 +Manufacturer#5 almond antique blue firebrick mint 31 1 1 1789.69 +Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 +Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 +Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 +PREHOOK: query: -- 13. testPTFAndWindowingInSubQ +select p_mfgr, p_name, +sub1.cd, sub1.s1 +from (select p_mfgr, p_name, +count(p_size) over (partition by p_mfgr order by p_name) as cd, +p_retailprice, +sum(p_retailprice) over w1 as s1 +from noop(on part +partition by p_mfgr +order by p_name) +window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) +) sub1 +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 13. testPTFAndWindowingInSubQ +select p_mfgr, p_name, +sub1.cd, sub1.s1 +from (select p_mfgr, p_name, +count(p_size) over (partition by p_mfgr order by p_name) as cd, +p_retailprice, +sum(p_retailprice) over w1 as s1 +from noop(on part +partition by p_mfgr +order by p_name) +window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) +) sub1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2 4100.06 +Manufacturer#1 almond antique burnished rose metallic 2 5702.650000000001 +Manufacturer#1 almond antique chartreuse lavender yellow 3 7117.070000000001 +Manufacturer#1 almond antique salmon chartreuse burlywood 4 7576.58 +Manufacturer#1 almond aquamarine burnished black steel 5 6403.43 +Manufacturer#1 almond aquamarine pink moccasin thistle 6 4649.67 +Manufacturer#2 almond antique violet chocolate turquoise 1 5523.360000000001 +Manufacturer#2 almond antique violet turquoise frosted 2 7222.02 +Manufacturer#2 almond aquamarine midnight light salmon 3 8923.62 +Manufacturer#2 almond aquamarine rose maroon antique 4 7232.9400000000005 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 5 5432.24 +Manufacturer#3 almond antique chartreuse khaki white 1 4272.34 +Manufacturer#3 almond antique forest lavender goldenrod 2 6195.32 +Manufacturer#3 almond antique metallic orange dim 3 7532.61 +Manufacturer#3 almond antique misty red olive 4 5860.929999999999 +Manufacturer#3 almond antique olive coral navajo 5 4670.66 +Manufacturer#4 almond antique gainsboro frosted violet 1 4202.35 +Manufacturer#4 almond antique violet mint lemon 2 6047.27 +Manufacturer#4 almond aquamarine floral ivory bisque 3 7337.620000000001 +Manufacturer#4 almond aquamarine yellow dodger mint 4 5716.950000000001 +Manufacturer#4 almond azure aquamarine papaya violet 5 4341.530000000001 +Manufacturer#5 almond antique blue firebrick mint 1 5190.08 +Manufacturer#5 almond antique medium spring khaki 2 6208.18 +Manufacturer#5 almond antique sky peru orange 3 7672.66 +Manufacturer#5 almond aquamarine dodger light gainsboro 4 5882.970000000001 +Manufacturer#5 almond azure blanched chiffon midnight 5 4271.3099999999995 +PREHOOK: query: -- 14. testPTFJoinWithWindowingWithCount +select abc.p_mfgr, abc.p_name, +rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, +dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr, +count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, +abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, +abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz +from noop(on part +partition by p_mfgr +order by p_name +) abc join part p1 on abc.p_partkey = p1.p_partkey +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 14. testPTFJoinWithWindowingWithCount +select abc.p_mfgr, abc.p_name, +rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, +dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr, +count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, +abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, +abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz +from noop(on part +partition by p_mfgr +order by p_name +) abc join part p1 on abc.p_partkey = p1.p_partkey +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 1 1 4 1173.15 1173.15 2 0 +Manufacturer#1 almond antique burnished rose metallic 1 1 4 1173.15 2346.3 2 0 +Manufacturer#1 almond antique burnished rose metallic 1 1 4 1173.15 3519.4500000000003 2 0 +Manufacturer#1 almond antique burnished rose metallic 1 1 4 1173.15 4692.6 2 0 +Manufacturer#1 almond antique chartreuse lavender yellow 5 2 5 1753.76 6446.360000000001 34 32 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 3 6 1602.59 8048.950000000001 6 -28 +Manufacturer#1 almond aquamarine burnished black steel 7 4 7 1414.42 9463.37 28 22 +Manufacturer#1 almond aquamarine pink moccasin thistle 8 5 8 1632.66 11096.03 42 14 +Manufacturer#2 almond antique violet chocolate turquoise 1 1 1 1690.68 1690.68 14 0 +Manufacturer#2 almond antique violet turquoise frosted 2 2 2 1800.7 3491.38 40 26 +Manufacturer#2 almond aquamarine midnight light salmon 3 3 3 2031.98 5523.360000000001 2 -38 +Manufacturer#2 almond aquamarine rose maroon antique 4 4 4 1698.66 7222.02 25 23 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 5 5 5 1701.6 8923.62 18 -7 +Manufacturer#3 almond antique chartreuse khaki white 1 1 1 1671.68 1671.68 17 0 +Manufacturer#3 almond antique forest lavender goldenrod 2 2 2 1190.27 2861.95 14 -3 +Manufacturer#3 almond antique metallic orange dim 3 3 3 1410.39 4272.34 19 5 +Manufacturer#3 almond antique misty red olive 4 4 4 1922.98 6195.32 1 -18 +Manufacturer#3 almond antique olive coral navajo 5 5 5 1337.29 7532.61 45 44 +Manufacturer#4 almond antique gainsboro frosted violet 1 1 1 1620.67 1620.67 10 0 +Manufacturer#4 almond antique violet mint lemon 2 2 2 1375.42 2996.09 39 29 +Manufacturer#4 almond aquamarine floral ivory bisque 3 3 3 1206.26 4202.35 27 -12 +Manufacturer#4 almond aquamarine yellow dodger mint 4 4 4 1844.92 6047.27 7 -20 +Manufacturer#4 almond azure aquamarine papaya violet 5 5 5 1290.35 7337.620000000001 12 5 +Manufacturer#5 almond antique blue firebrick mint 1 1 1 1789.69 1789.69 31 0 +Manufacturer#5 almond antique medium spring khaki 2 2 2 1611.66 3401.3500000000004 6 -25 +Manufacturer#5 almond antique sky peru orange 3 3 3 1788.73 5190.08 2 -4 +Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 4 1018.1 6208.18 46 44 +Manufacturer#5 almond azure blanched chiffon midnight 5 5 5 1464.48 7672.66 23 -23 +PREHOOK: query: -- 15. testDistinctInSelectWithPTF +select DISTINCT p_mfgr, p_name, p_size +from noop(on part +partition by p_mfgr +order by p_name) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 15. testDistinctInSelectWithPTF +select DISTINCT p_mfgr, p_name, p_size +from noop(on part +partition by p_mfgr +order by p_name) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2 +Manufacturer#1 almond antique chartreuse lavender yellow 34 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 +Manufacturer#1 almond aquamarine burnished black steel 28 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 +Manufacturer#2 almond antique violet chocolate turquoise 14 +Manufacturer#2 almond antique violet turquoise frosted 40 +Manufacturer#2 almond aquamarine midnight light salmon 2 +Manufacturer#2 almond aquamarine rose maroon antique 25 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 +Manufacturer#3 almond antique chartreuse khaki white 17 +Manufacturer#3 almond antique forest lavender goldenrod 14 +Manufacturer#3 almond antique metallic orange dim 19 +Manufacturer#3 almond antique misty red olive 1 +Manufacturer#3 almond antique olive coral navajo 45 +Manufacturer#4 almond antique gainsboro frosted violet 10 +Manufacturer#4 almond antique violet mint lemon 39 +Manufacturer#4 almond aquamarine floral ivory bisque 27 +Manufacturer#4 almond aquamarine yellow dodger mint 7 +Manufacturer#4 almond azure aquamarine papaya violet 12 +Manufacturer#5 almond antique blue firebrick mint 31 +Manufacturer#5 almond antique medium spring khaki 6 +Manufacturer#5 almond antique sky peru orange 2 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 +Manufacturer#5 almond azure blanched chiffon midnight 23 +PREHOOK: query: -- 16. testViewAsTableInputToPTF +create view IF NOT EXISTS mfgr_price_view as +select p_mfgr, p_brand, +sum(p_retailprice) as s +from part +group by p_mfgr, p_brand +PREHOOK: type: CREATEVIEW +POSTHOOK: query: -- 16. testViewAsTableInputToPTF +create view IF NOT EXISTS mfgr_price_view as +select p_mfgr, p_brand, +sum(p_retailprice) as s +from part +group by p_mfgr, p_brand +POSTHOOK: type: CREATEVIEW +POSTHOOK: Output: default@mfgr_price_view +PREHOOK: query: select p_mfgr, p_brand, s, +sum(s) over w1 as s1 +from noop(on mfgr_price_view +partition by p_mfgr +order by p_mfgr) +window w1 as ( partition by p_mfgr order by p_brand rows between 2 preceding and current row) +PREHOOK: type: QUERY +PREHOOK: Input: default@mfgr_price_view +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: select p_mfgr, p_brand, s, +sum(s) over w1 as s1 +from noop(on mfgr_price_view +partition by p_mfgr +order by p_mfgr) +window w1 as ( partition by p_mfgr order by p_brand rows between 2 preceding and current row) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mfgr_price_view +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 Brand#12 4800.84 4800.84 +Manufacturer#1 Brand#14 2346.3 7147.14 +Manufacturer#1 Brand#15 1602.59 8749.73 +Manufacturer#2 Brand#22 3491.38 3491.38 +Manufacturer#2 Brand#23 2031.98 5523.360000000001 +Manufacturer#2 Brand#24 1698.66 7222.02 +Manufacturer#2 Brand#25 1701.6 5432.24 +Manufacturer#3 Brand#31 1671.68 1671.68 +Manufacturer#3 Brand#32 3333.37 5005.05 +Manufacturer#3 Brand#34 1337.29 6342.34 +Manufacturer#3 Brand#35 1190.27 5860.93 +Manufacturer#4 Brand#41 4755.9400000000005 4755.9400000000005 +Manufacturer#4 Brand#42 2581.6800000000003 7337.620000000001 +Manufacturer#5 Brand#51 1611.66 1611.66 +Manufacturer#5 Brand#52 3254.17 4865.83 +Manufacturer#5 Brand#53 2806.83 7672.66 +PREHOOK: query: -- 17. testMultipleInserts2SWQsWithPTF +CREATE TABLE part_4( +p_mfgr STRING, +p_name STRING, +p_size INT, +r INT, +dr INT, +s DOUBLE) +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- 17. testMultipleInserts2SWQsWithPTF +CREATE TABLE part_4( +p_mfgr STRING, +p_name STRING, +p_size INT, +r INT, +dr INT, +s DOUBLE) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@part_4 +PREHOOK: query: CREATE TABLE part_5( +p_mfgr STRING, +p_name STRING, +p_size INT, +s2 INT, +r INT, +dr INT, +cud DOUBLE, +fv1 INT) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE part_5( +p_mfgr STRING, +p_name STRING, +p_size INT, +s2 INT, +r INT, +dr INT, +cud DOUBLE, +fv1 INT) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@part_5 +PREHOOK: query: from noop(on part +partition by p_mfgr +order by p_name) +INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, +rank() over (distribute by p_mfgr sort by p_name) as r, +dense_rank() over (distribute by p_mfgr sort by p_name) as dr, +sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s +INSERT OVERWRITE TABLE part_5 select p_mfgr,p_name, p_size, +round(sum(p_size) over (distribute by p_mfgr sort by p_size range between 5 preceding and current row),1) as s2, +rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as r, +dense_rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as dr, +cume_dist() over (distribute by p_mfgr sort by p_mfgr, p_name) as cud, +first_value(p_size, true) over w1 as fv1 +window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +PREHOOK: Output: default@part_4 +PREHOOK: Output: default@part_5 +POSTHOOK: query: from noop(on part +partition by p_mfgr +order by p_name) +INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, +rank() over (distribute by p_mfgr sort by p_name) as r, +dense_rank() over (distribute by p_mfgr sort by p_name) as dr, +sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s +INSERT OVERWRITE TABLE part_5 select p_mfgr,p_name, p_size, +round(sum(p_size) over (distribute by p_mfgr sort by p_size range between 5 preceding and current row),1) as s2, +rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as r, +dense_rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as dr, +cume_dist() over (distribute by p_mfgr sort by p_mfgr, p_name) as cud, +first_value(p_size, true) over w1 as fv1 +window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +POSTHOOK: Output: default@part_4 +POSTHOOK: Output: default@part_5 +POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +PREHOOK: query: select * from part_4 +PREHOOK: type: QUERY +PREHOOK: Input: default@part_4 +#### A masked pattern was here #### +POSTHOOK: query: select * from part_4 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part_4 +#### A masked pattern was here #### +POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15 +Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3 +Manufacturer#1 almond antique chartreuse lavender yellow 34 3 2 4100.06 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 3 5702.650000000001 +Manufacturer#1 almond aquamarine burnished black steel 28 5 4 7117.070000000001 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 5 8749.730000000001 +Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 1690.68 +Manufacturer#2 almond antique violet turquoise frosted 40 2 2 3491.38 +Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 5523.360000000001 +Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 7222.02 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 8923.62 +Manufacturer#3 almond antique chartreuse khaki white 17 1 1 1671.68 +Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 2861.95 +Manufacturer#3 almond antique metallic orange dim 19 3 3 4272.34 +Manufacturer#3 almond antique misty red olive 1 4 4 6195.32 +Manufacturer#3 almond antique olive coral navajo 45 5 5 7532.61 +Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 1620.67 +Manufacturer#4 almond antique violet mint lemon 39 2 2 2996.09 +Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 4202.35 +Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 6047.27 +Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 7337.620000000001 +Manufacturer#5 almond antique blue firebrick mint 31 1 1 1789.69 +Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 +Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 +Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 +PREHOOK: query: select * from part_5 +PREHOOK: type: QUERY +PREHOOK: Input: default@part_5 +#### A masked pattern was here #### +POSTHOOK: query: select * from part_5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part_5 +#### A masked pattern was here #### +POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +Manufacturer#1 almond antique burnished rose metallic 2 4 1 1 0.3333333333333333 2 +Manufacturer#1 almond antique burnished rose metallic 2 4 1 1 0.3333333333333333 2 +Manufacturer#1 almond antique chartreuse lavender yellow 34 34 3 2 0.5 2 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 10 4 3 0.6666666666666666 2 +Manufacturer#1 almond aquamarine burnished black steel 28 28 5 4 0.8333333333333334 34 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 42 6 5 1.0 6 +Manufacturer#2 almond antique violet chocolate turquoise 14 14 1 1 0.2 14 +Manufacturer#2 almond antique violet turquoise frosted 40 40 2 2 0.4 14 +Manufacturer#2 almond aquamarine midnight light salmon 2 2 3 3 0.6 14 +Manufacturer#2 almond aquamarine rose maroon antique 25 25 4 4 0.8 40 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 32 5 5 1.0 2 +Manufacturer#3 almond antique chartreuse khaki white 17 31 1 1 0.2 17 +Manufacturer#3 almond antique forest lavender goldenrod 14 14 2 2 0.4 17 +Manufacturer#3 almond antique metallic orange dim 19 50 3 3 0.6 17 +Manufacturer#3 almond antique misty red olive 1 1 4 4 0.8 14 +Manufacturer#3 almond antique olive coral navajo 45 45 5 5 1.0 19 +Manufacturer#4 almond antique gainsboro frosted violet 10 17 1 1 0.2 10 +Manufacturer#4 almond antique violet mint lemon 39 39 2 2 0.4 10 +Manufacturer#4 almond aquamarine floral ivory bisque 27 27 3 3 0.6 10 +Manufacturer#4 almond aquamarine yellow dodger mint 7 7 4 4 0.8 39 +Manufacturer#4 almond azure aquamarine papaya violet 12 29 5 5 1.0 27 +Manufacturer#5 almond antique blue firebrick mint 31 31 1 1 0.2 31 +Manufacturer#5 almond antique medium spring khaki 6 8 2 2 0.4 31 +Manufacturer#5 almond antique sky peru orange 2 2 3 3 0.6 31 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 46 4 4 0.8 6 +Manufacturer#5 almond azure blanched chiffon midnight 23 23 5 5 1.0 2 +PREHOOK: query: -- 18. testMulti2OperatorsFunctionChainWithMap +select p_mfgr, p_name, +rank() over (partition by p_mfgr,p_name) as r, +dense_rank() over (partition by p_mfgr,p_name) as dr, +p_size, sum(p_size) over (partition by p_mfgr,p_name rows between unbounded preceding and current row) as s1 +from noop(on + noopwithmap(on + noop(on + noop(on part + partition by p_mfgr + order by p_mfgr) + ) + partition by p_mfgr,p_name + order by p_mfgr,p_name) + partition by p_mfgr,p_name + order by p_mfgr,p_name) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 18. testMulti2OperatorsFunctionChainWithMap +select p_mfgr, p_name, +rank() over (partition by p_mfgr,p_name) as r, +dense_rank() over (partition by p_mfgr,p_name) as dr, +p_size, sum(p_size) over (partition by p_mfgr,p_name rows between unbounded preceding and current row) as s1 +from noop(on + noopwithmap(on + noop(on + noop(on part + partition by p_mfgr + order by p_mfgr) + ) + partition by p_mfgr,p_name + order by p_mfgr,p_name) + partition by p_mfgr,p_name + order by p_mfgr,p_name) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +Manufacturer#1 almond antique burnished rose metallic 1 1 2 2 +Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 +Manufacturer#1 almond antique chartreuse lavender yellow 1 1 34 34 +Manufacturer#1 almond antique salmon chartreuse burlywood 1 1 6 6 +Manufacturer#1 almond aquamarine burnished black steel 1 1 28 28 +Manufacturer#1 almond aquamarine pink moccasin thistle 1 1 42 42 +Manufacturer#2 almond antique violet chocolate turquoise 1 1 14 14 +Manufacturer#2 almond antique violet turquoise frosted 1 1 40 40 +Manufacturer#2 almond aquamarine midnight light salmon 1 1 2 2 +Manufacturer#2 almond aquamarine rose maroon antique 1 1 25 25 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 1 1 18 18 +Manufacturer#3 almond antique chartreuse khaki white 1 1 17 17 +Manufacturer#3 almond antique forest lavender goldenrod 1 1 14 14 +Manufacturer#3 almond antique metallic orange dim 1 1 19 19 +Manufacturer#3 almond antique misty red olive 1 1 1 1 +Manufacturer#3 almond antique olive coral navajo 1 1 45 45 +Manufacturer#4 almond antique gainsboro frosted violet 1 1 10 10 +Manufacturer#4 almond antique violet mint lemon 1 1 39 39 +Manufacturer#4 almond aquamarine floral ivory bisque 1 1 27 27 +Manufacturer#4 almond aquamarine yellow dodger mint 1 1 7 7 +Manufacturer#4 almond azure aquamarine papaya violet 1 1 12 12 +Manufacturer#5 almond antique blue firebrick mint 1 1 31 31 +Manufacturer#5 almond antique medium spring khaki 1 1 6 6 +Manufacturer#5 almond antique sky peru orange 1 1 2 2 +Manufacturer#5 almond aquamarine dodger light gainsboro 1 1 46 46 +Manufacturer#5 almond azure blanched chiffon midnight 1 1 23 23 +PREHOOK: query: -- 19. testMulti3OperatorsFunctionChain +select p_mfgr, p_name, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +p_size, sum(p_size) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 +from noop(on + noop(on + noop(on + noop(on part + partition by p_mfgr + order by p_mfgr) + ) + partition by p_mfgr,p_name + order by p_mfgr,p_name) + partition by p_mfgr + order by p_mfgr ) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 19. testMulti3OperatorsFunctionChain +select p_mfgr, p_name, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +p_size, sum(p_size) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 +from noop(on + noop(on + noop(on + noop(on part + partition by p_mfgr + order by p_mfgr) + ) + partition by p_mfgr,p_name + order by p_mfgr,p_name) + partition by p_mfgr + order by p_mfgr ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +Manufacturer#1 almond antique burnished rose metallic 1 1 2 2 +Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 +Manufacturer#1 almond antique chartreuse lavender yellow 3 2 34 38 +Manufacturer#1 almond antique salmon chartreuse burlywood 4 3 6 44 +Manufacturer#1 almond aquamarine burnished black steel 5 4 28 72 +Manufacturer#1 almond aquamarine pink moccasin thistle 6 5 42 114 +Manufacturer#2 almond antique violet chocolate turquoise 1 1 14 14 +Manufacturer#2 almond antique violet turquoise frosted 2 2 40 54 +Manufacturer#2 almond aquamarine midnight light salmon 3 3 2 56 +Manufacturer#2 almond aquamarine rose maroon antique 4 4 25 81 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 5 5 18 99 +Manufacturer#3 almond antique chartreuse khaki white 1 1 17 17 +Manufacturer#3 almond antique forest lavender goldenrod 2 2 14 31 +Manufacturer#3 almond antique metallic orange dim 3 3 19 50 +Manufacturer#3 almond antique misty red olive 4 4 1 51 +Manufacturer#3 almond antique olive coral navajo 5 5 45 96 +Manufacturer#4 almond antique gainsboro frosted violet 1 1 10 10 +Manufacturer#4 almond antique violet mint lemon 2 2 39 49 +Manufacturer#4 almond aquamarine floral ivory bisque 3 3 27 76 +Manufacturer#4 almond aquamarine yellow dodger mint 4 4 7 83 +Manufacturer#4 almond azure aquamarine papaya violet 5 5 12 95 +Manufacturer#5 almond antique blue firebrick mint 1 1 31 31 +Manufacturer#5 almond antique medium spring khaki 2 2 6 37 +Manufacturer#5 almond antique sky peru orange 3 3 2 39 +Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 46 85 +Manufacturer#5 almond azure blanched chiffon midnight 5 5 23 108 +PREHOOK: query: -- 20. testMultiOperatorChainWithNoWindowing +select p_mfgr, p_name, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +p_size, sum(p_size) over (partition by p_mfgr order by p_name) as s1 +from noop(on + noop(on + noop(on + noop(on part + partition by p_mfgr,p_name + order by p_mfgr,p_name) + ) + partition by p_mfgr + order by p_mfgr)) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 20. testMultiOperatorChainWithNoWindowing +select p_mfgr, p_name, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +p_size, sum(p_size) over (partition by p_mfgr order by p_name) as s1 +from noop(on + noop(on + noop(on + noop(on part + partition by p_mfgr,p_name + order by p_mfgr,p_name) + ) + partition by p_mfgr + order by p_mfgr)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 +Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 +Manufacturer#1 almond antique chartreuse lavender yellow 3 2 34 38 +Manufacturer#1 almond antique salmon chartreuse burlywood 4 3 6 44 +Manufacturer#1 almond aquamarine burnished black steel 5 4 28 72 +Manufacturer#1 almond aquamarine pink moccasin thistle 6 5 42 114 +Manufacturer#2 almond antique violet chocolate turquoise 1 1 14 14 +Manufacturer#2 almond antique violet turquoise frosted 2 2 40 54 +Manufacturer#2 almond aquamarine midnight light salmon 3 3 2 56 +Manufacturer#2 almond aquamarine rose maroon antique 4 4 25 81 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 5 5 18 99 +Manufacturer#3 almond antique chartreuse khaki white 1 1 17 17 +Manufacturer#3 almond antique forest lavender goldenrod 2 2 14 31 +Manufacturer#3 almond antique metallic orange dim 3 3 19 50 +Manufacturer#3 almond antique misty red olive 4 4 1 51 +Manufacturer#3 almond antique olive coral navajo 5 5 45 96 +Manufacturer#4 almond antique gainsboro frosted violet 1 1 10 10 +Manufacturer#4 almond antique violet mint lemon 2 2 39 49 +Manufacturer#4 almond aquamarine floral ivory bisque 3 3 27 76 +Manufacturer#4 almond aquamarine yellow dodger mint 4 4 7 83 +Manufacturer#4 almond azure aquamarine papaya violet 5 5 12 95 +Manufacturer#5 almond antique blue firebrick mint 1 1 31 31 +Manufacturer#5 almond antique medium spring khaki 2 2 6 37 +Manufacturer#5 almond antique sky peru orange 3 3 2 39 +Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 46 85 +Manufacturer#5 almond azure blanched chiffon midnight 5 5 23 108 +PREHOOK: query: -- 21. testMultiOperatorChainEndsWithNoopMap +select p_mfgr, p_name, +rank() over (partition by p_mfgr,p_name) as r, +dense_rank() over (partition by p_mfgr,p_name) as dr, +p_size, sum(p_size) over (partition by p_mfgr,p_name rows between unbounded preceding and current row) as s1 +from noopwithmap(on + noop(on + noop(on + noop(on part + partition by p_mfgr,p_name + order by p_mfgr,p_name) + ) + partition by p_mfgr + order by p_mfgr) + partition by p_mfgr,p_name + order by p_mfgr,p_name) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 21. testMultiOperatorChainEndsWithNoopMap +select p_mfgr, p_name, +rank() over (partition by p_mfgr,p_name) as r, +dense_rank() over (partition by p_mfgr,p_name) as dr, +p_size, sum(p_size) over (partition by p_mfgr,p_name rows between unbounded preceding and current row) as s1 +from noopwithmap(on + noop(on + noop(on + noop(on part + partition by p_mfgr,p_name + order by p_mfgr,p_name) + ) + partition by p_mfgr + order by p_mfgr) + partition by p_mfgr,p_name + order by p_mfgr,p_name) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +Manufacturer#1 almond antique burnished rose metallic 1 1 2 2 +Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 +Manufacturer#1 almond antique chartreuse lavender yellow 1 1 34 34 +Manufacturer#1 almond antique salmon chartreuse burlywood 1 1 6 6 +Manufacturer#1 almond aquamarine burnished black steel 1 1 28 28 +Manufacturer#1 almond aquamarine pink moccasin thistle 1 1 42 42 +Manufacturer#2 almond antique violet chocolate turquoise 1 1 14 14 +Manufacturer#2 almond antique violet turquoise frosted 1 1 40 40 +Manufacturer#2 almond aquamarine midnight light salmon 1 1 2 2 +Manufacturer#2 almond aquamarine rose maroon antique 1 1 25 25 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 1 1 18 18 +Manufacturer#3 almond antique chartreuse khaki white 1 1 17 17 +Manufacturer#3 almond antique forest lavender goldenrod 1 1 14 14 +Manufacturer#3 almond antique metallic orange dim 1 1 19 19 +Manufacturer#3 almond antique misty red olive 1 1 1 1 +Manufacturer#3 almond antique olive coral navajo 1 1 45 45 +Manufacturer#4 almond antique gainsboro frosted violet 1 1 10 10 +Manufacturer#4 almond antique violet mint lemon 1 1 39 39 +Manufacturer#4 almond aquamarine floral ivory bisque 1 1 27 27 +Manufacturer#4 almond aquamarine yellow dodger mint 1 1 7 7 +Manufacturer#4 almond azure aquamarine papaya violet 1 1 12 12 +Manufacturer#5 almond antique blue firebrick mint 1 1 31 31 +Manufacturer#5 almond antique medium spring khaki 1 1 6 6 +Manufacturer#5 almond antique sky peru orange 1 1 2 2 +Manufacturer#5 almond aquamarine dodger light gainsboro 1 1 46 46 +Manufacturer#5 almond azure blanched chiffon midnight 1 1 23 23 +PREHOOK: query: -- 22. testMultiOperatorChainWithDiffPartitionForWindow1 +select p_mfgr, p_name, +rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as r, +dense_rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as dr, +p_size, +sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row) as s1, +sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row) as s2 +from noop(on + noopwithmap(on + noop(on part + partition by p_mfgr, p_name + order by p_mfgr, p_name) + partition by p_mfgr + order by p_mfgr + )) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 22. testMultiOperatorChainWithDiffPartitionForWindow1 +select p_mfgr, p_name, +rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as r, +dense_rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as dr, +p_size, +sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row) as s1, +sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row) as s2 +from noop(on + noopwithmap(on + noop(on part + partition by p_mfgr, p_name + order by p_mfgr, p_name) + partition by p_mfgr + order by p_mfgr + )) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +Manufacturer#1 almond antique burnished rose metallic 1 1 2 2 2 +Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 4 +Manufacturer#1 almond antique chartreuse lavender yellow 1 1 34 34 34 +Manufacturer#1 almond antique salmon chartreuse burlywood 1 1 6 6 6 +Manufacturer#1 almond aquamarine burnished black steel 1 1 28 28 28 +Manufacturer#1 almond aquamarine pink moccasin thistle 1 1 42 42 42 +Manufacturer#2 almond antique violet chocolate turquoise 1 1 14 14 14 +Manufacturer#2 almond antique violet turquoise frosted 1 1 40 40 40 +Manufacturer#2 almond aquamarine midnight light salmon 1 1 2 2 2 +Manufacturer#2 almond aquamarine rose maroon antique 1 1 25 25 25 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 1 1 18 18 18 +Manufacturer#3 almond antique chartreuse khaki white 1 1 17 17 17 +Manufacturer#3 almond antique forest lavender goldenrod 1 1 14 14 14 +Manufacturer#3 almond antique metallic orange dim 1 1 19 19 19 +Manufacturer#3 almond antique misty red olive 1 1 1 1 1 +Manufacturer#3 almond antique olive coral navajo 1 1 45 45 45 +Manufacturer#4 almond antique gainsboro frosted violet 1 1 10 10 10 +Manufacturer#4 almond antique violet mint lemon 1 1 39 39 39 +Manufacturer#4 almond aquamarine floral ivory bisque 1 1 27 27 27 +Manufacturer#4 almond aquamarine yellow dodger mint 1 1 7 7 7 +Manufacturer#4 almond azure aquamarine papaya violet 1 1 12 12 12 +Manufacturer#5 almond antique blue firebrick mint 1 1 31 31 31 +Manufacturer#5 almond antique medium spring khaki 1 1 6 6 6 +Manufacturer#5 almond antique sky peru orange 1 1 2 2 2 +Manufacturer#5 almond aquamarine dodger light gainsboro 1 1 46 46 46 +Manufacturer#5 almond azure blanched chiffon midnight 1 1 23 23 23 +PREHOOK: query: -- 23. testMultiOperatorChainWithDiffPartitionForWindow2 +select p_mfgr, p_name, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +p_size, +sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s1, +sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s2 +from noopwithmap(on + noop(on + noop(on part + partition by p_mfgr, p_name + order by p_mfgr, p_name) + )) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 23. testMultiOperatorChainWithDiffPartitionForWindow2 +select p_mfgr, p_name, +rank() over (partition by p_mfgr order by p_name) as r, +dense_rank() over (partition by p_mfgr order by p_name) as dr, +p_size, +sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s1, +sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s2 +from noopwithmap(on + noop(on + noop(on part + partition by p_mfgr, p_name + order by p_mfgr, p_name) + )) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 4 +Manufacturer#1 almond antique burnished rose metallic 1 1 2 4 4 +Manufacturer#1 almond antique chartreuse lavender yellow 3 2 34 38 38 +Manufacturer#1 almond antique salmon chartreuse burlywood 4 3 6 44 44 +Manufacturer#1 almond aquamarine burnished black steel 5 4 28 72 72 +Manufacturer#1 almond aquamarine pink moccasin thistle 6 5 42 114 114 +Manufacturer#2 almond antique violet chocolate turquoise 1 1 14 14 14 +Manufacturer#2 almond antique violet turquoise frosted 2 2 40 54 54 +Manufacturer#2 almond aquamarine midnight light salmon 3 3 2 56 56 +Manufacturer#2 almond aquamarine rose maroon antique 4 4 25 81 81 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 5 5 18 99 99 +Manufacturer#3 almond antique chartreuse khaki white 1 1 17 17 17 +Manufacturer#3 almond antique forest lavender goldenrod 2 2 14 31 31 +Manufacturer#3 almond antique metallic orange dim 3 3 19 50 50 +Manufacturer#3 almond antique misty red olive 4 4 1 51 51 +Manufacturer#3 almond antique olive coral navajo 5 5 45 96 96 +Manufacturer#4 almond antique gainsboro frosted violet 1 1 10 10 10 +Manufacturer#4 almond antique violet mint lemon 2 2 39 49 49 +Manufacturer#4 almond aquamarine floral ivory bisque 3 3 27 76 76 +Manufacturer#4 almond aquamarine yellow dodger mint 4 4 7 83 83 +Manufacturer#4 almond azure aquamarine papaya violet 5 5 12 95 95 +Manufacturer#5 almond antique blue firebrick mint 1 1 31 31 31 +Manufacturer#5 almond antique medium spring khaki 2 2 6 37 37 +Manufacturer#5 almond antique sky peru orange 3 3 2 39 39 +Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 46 85 85 +Manufacturer#5 almond azure blanched chiffon midnight 5 5 23 108 108 Index: ql/src/test/results/clientpositive/tez/sample1.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/sample1.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/sample1.q.out (working copy) @@ -0,0 +1,853 @@ +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING, dt STRING, hr STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING, dt STRING, hr STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: -- no input pruning, no sample filter +EXPLAIN EXTENDED +INSERT OVERWRITE TABLE dest1 SELECT s.* +FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s +WHERE s.ds='2008-04-08' and s.hr='11' +PREHOOK: type: QUERY +POSTHOOK: query: -- no input pruning, no sample filter +EXPLAIN EXTENDED +INSERT OVERWRITE TABLE dest1 SELECT s.* +FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s +WHERE s.ds='2008-04-08' and s.hr='11' +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart) (TOK_TABLEBUCKETSAMPLE 1 1 (TOK_FUNCTION rand)) s)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME s)))) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL s) ds) '2008-04-08') (= (. (TOK_TABLE_OR_COL s) hr) '11'))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + s + TableScan + alias: s + Statistics: + numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE + GatherStats: false + Filter Operator + isSamplingPred: true + predicate: + expr: (((hash(rand()) & 2147483647) % 1) = 0) + type: boolean + Statistics: + numRows: 14 dataSize: 2805 basicStatsState: COMPLETE colStatsState: NONE + Select Operator + expressions: + expr: UDFToInteger(key) + type: int + expr: value + type: string + expr: ds + type: string + expr: hr + type: string + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: + numRows: 14 dataSize: 2805 basicStatsState: COMPLETE colStatsState: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: + numRows: 14 dataSize: 2805 basicStatsState: COMPLETE colStatsState: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value,dt,hr + columns.types int:string:string:string +#### A masked pattern was here #### + name default.dest1 + serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + hr 11 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.types string:string +#### A masked pattern was here #### + name default.srcpart + numFiles 1 + numRows 0 + partition_columns ds/hr + rawDataSize 0 + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.types string:string +#### A masked pattern was here #### + name default.srcpart + partition_columns ds/hr + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart + name: default.srcpart + Truncated Path -> Alias: + /srcpart/ds=2008-04-08/hr=11 [s] + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value,dt,hr + columns.types int:string:string:string +#### A masked pattern was here #### + name default.dest1 + serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 +#### A masked pattern was here #### + + Stage: Stage-3 + Stats-Aggr Operator +#### A masked pattern was here #### + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + GatherStats: false + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value,dt,hr + columns.types int:string:string:string +#### A masked pattern was here #### + name default.dest1 + serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value,dt,hr + columns.types int:string:string:string +#### A masked pattern was here #### + name default.dest1 + serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value,dt,hr + columns.types int:string:string:string +#### A masked pattern was here #### + name default.dest1 + serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + name: default.dest1 + Truncated Path -> Alias: +#### A masked pattern was here #### + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + GatherStats: false + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value,dt,hr + columns.types int:string:string:string +#### A masked pattern was here #### + name default.dest1 + serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value,dt,hr + columns.types int:string:string:string +#### A masked pattern was here #### + name default.dest1 + serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value,dt,hr + columns.types int:string:string:string +#### A masked pattern was here #### + name default.dest1 + serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + name: default.dest1 + Truncated Path -> Alias: +#### A masked pattern was here #### + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* +FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s +WHERE s.ds='2008-04-08' and s.hr='11' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Output: default@dest1 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* +FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s +WHERE s.ds='2008-04-08' and s.hr='11' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.dt SIMPLE [(srcpart)s.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: dest1.hr SIMPLE [(srcpart)s.FieldSchema(name:hr, type:string, comment:null), ] +POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)s.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(srcpart)s.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1.* FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +#### A masked pattern was here #### +POSTHOOK: Lineage: dest1.dt SIMPLE [(srcpart)s.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: dest1.hr SIMPLE [(srcpart)s.FieldSchema(name:hr, type:string, comment:null), ] +POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)s.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(srcpart)s.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 2008-04-08 11 +86 val_86 2008-04-08 11 +311 val_311 2008-04-08 11 +27 val_27 2008-04-08 11 +165 val_165 2008-04-08 11 +409 val_409 2008-04-08 11 +255 val_255 2008-04-08 11 +278 val_278 2008-04-08 11 +98 val_98 2008-04-08 11 +484 val_484 2008-04-08 11 +265 val_265 2008-04-08 11 +193 val_193 2008-04-08 11 +401 val_401 2008-04-08 11 +150 val_150 2008-04-08 11 +273 val_273 2008-04-08 11 +224 val_224 2008-04-08 11 +369 val_369 2008-04-08 11 +66 val_66 2008-04-08 11 +128 val_128 2008-04-08 11 +213 val_213 2008-04-08 11 +146 val_146 2008-04-08 11 +406 val_406 2008-04-08 11 +429 val_429 2008-04-08 11 +374 val_374 2008-04-08 11 +152 val_152 2008-04-08 11 +469 val_469 2008-04-08 11 +145 val_145 2008-04-08 11 +495 val_495 2008-04-08 11 +37 val_37 2008-04-08 11 +327 val_327 2008-04-08 11 +281 val_281 2008-04-08 11 +277 val_277 2008-04-08 11 +209 val_209 2008-04-08 11 +15 val_15 2008-04-08 11 +82 val_82 2008-04-08 11 +403 val_403 2008-04-08 11 +166 val_166 2008-04-08 11 +417 val_417 2008-04-08 11 +430 val_430 2008-04-08 11 +252 val_252 2008-04-08 11 +292 val_292 2008-04-08 11 +219 val_219 2008-04-08 11 +287 val_287 2008-04-08 11 +153 val_153 2008-04-08 11 +193 val_193 2008-04-08 11 +338 val_338 2008-04-08 11 +446 val_446 2008-04-08 11 +459 val_459 2008-04-08 11 +394 val_394 2008-04-08 11 +237 val_237 2008-04-08 11 +482 val_482 2008-04-08 11 +174 val_174 2008-04-08 11 +413 val_413 2008-04-08 11 +494 val_494 2008-04-08 11 +207 val_207 2008-04-08 11 +199 val_199 2008-04-08 11 +466 val_466 2008-04-08 11 +208 val_208 2008-04-08 11 +174 val_174 2008-04-08 11 +399 val_399 2008-04-08 11 +396 val_396 2008-04-08 11 +247 val_247 2008-04-08 11 +417 val_417 2008-04-08 11 +489 val_489 2008-04-08 11 +162 val_162 2008-04-08 11 +377 val_377 2008-04-08 11 +397 val_397 2008-04-08 11 +309 val_309 2008-04-08 11 +365 val_365 2008-04-08 11 +266 val_266 2008-04-08 11 +439 val_439 2008-04-08 11 +342 val_342 2008-04-08 11 +367 val_367 2008-04-08 11 +325 val_325 2008-04-08 11 +167 val_167 2008-04-08 11 +195 val_195 2008-04-08 11 +475 val_475 2008-04-08 11 +17 val_17 2008-04-08 11 +113 val_113 2008-04-08 11 +155 val_155 2008-04-08 11 +203 val_203 2008-04-08 11 +339 val_339 2008-04-08 11 +0 val_0 2008-04-08 11 +455 val_455 2008-04-08 11 +128 val_128 2008-04-08 11 +311 val_311 2008-04-08 11 +316 val_316 2008-04-08 11 +57 val_57 2008-04-08 11 +302 val_302 2008-04-08 11 +205 val_205 2008-04-08 11 +149 val_149 2008-04-08 11 +438 val_438 2008-04-08 11 +345 val_345 2008-04-08 11 +129 val_129 2008-04-08 11 +170 val_170 2008-04-08 11 +20 val_20 2008-04-08 11 +489 val_489 2008-04-08 11 +157 val_157 2008-04-08 11 +378 val_378 2008-04-08 11 +221 val_221 2008-04-08 11 +92 val_92 2008-04-08 11 +111 val_111 2008-04-08 11 +47 val_47 2008-04-08 11 +72 val_72 2008-04-08 11 +4 val_4 2008-04-08 11 +280 val_280 2008-04-08 11 +35 val_35 2008-04-08 11 +427 val_427 2008-04-08 11 +277 val_277 2008-04-08 11 +208 val_208 2008-04-08 11 +356 val_356 2008-04-08 11 +399 val_399 2008-04-08 11 +169 val_169 2008-04-08 11 +382 val_382 2008-04-08 11 +498 val_498 2008-04-08 11 +125 val_125 2008-04-08 11 +386 val_386 2008-04-08 11 +437 val_437 2008-04-08 11 +469 val_469 2008-04-08 11 +192 val_192 2008-04-08 11 +286 val_286 2008-04-08 11 +187 val_187 2008-04-08 11 +176 val_176 2008-04-08 11 +54 val_54 2008-04-08 11 +459 val_459 2008-04-08 11 +51 val_51 2008-04-08 11 +138 val_138 2008-04-08 11 +103 val_103 2008-04-08 11 +239 val_239 2008-04-08 11 +213 val_213 2008-04-08 11 +216 val_216 2008-04-08 11 +430 val_430 2008-04-08 11 +278 val_278 2008-04-08 11 +176 val_176 2008-04-08 11 +289 val_289 2008-04-08 11 +221 val_221 2008-04-08 11 +65 val_65 2008-04-08 11 +318 val_318 2008-04-08 11 +332 val_332 2008-04-08 11 +311 val_311 2008-04-08 11 +275 val_275 2008-04-08 11 +137 val_137 2008-04-08 11 +241 val_241 2008-04-08 11 +83 val_83 2008-04-08 11 +333 val_333 2008-04-08 11 +180 val_180 2008-04-08 11 +284 val_284 2008-04-08 11 +12 val_12 2008-04-08 11 +230 val_230 2008-04-08 11 +181 val_181 2008-04-08 11 +67 val_67 2008-04-08 11 +260 val_260 2008-04-08 11 +404 val_404 2008-04-08 11 +384 val_384 2008-04-08 11 +489 val_489 2008-04-08 11 +353 val_353 2008-04-08 11 +373 val_373 2008-04-08 11 +272 val_272 2008-04-08 11 +138 val_138 2008-04-08 11 +217 val_217 2008-04-08 11 +84 val_84 2008-04-08 11 +348 val_348 2008-04-08 11 +466 val_466 2008-04-08 11 +58 val_58 2008-04-08 11 +8 val_8 2008-04-08 11 +411 val_411 2008-04-08 11 +230 val_230 2008-04-08 11 +208 val_208 2008-04-08 11 +348 val_348 2008-04-08 11 +24 val_24 2008-04-08 11 +463 val_463 2008-04-08 11 +431 val_431 2008-04-08 11 +179 val_179 2008-04-08 11 +172 val_172 2008-04-08 11 +42 val_42 2008-04-08 11 +129 val_129 2008-04-08 11 +158 val_158 2008-04-08 11 +119 val_119 2008-04-08 11 +496 val_496 2008-04-08 11 +0 val_0 2008-04-08 11 +322 val_322 2008-04-08 11 +197 val_197 2008-04-08 11 +468 val_468 2008-04-08 11 +393 val_393 2008-04-08 11 +454 val_454 2008-04-08 11 +100 val_100 2008-04-08 11 +298 val_298 2008-04-08 11 +199 val_199 2008-04-08 11 +191 val_191 2008-04-08 11 +418 val_418 2008-04-08 11 +96 val_96 2008-04-08 11 +26 val_26 2008-04-08 11 +165 val_165 2008-04-08 11 +327 val_327 2008-04-08 11 +230 val_230 2008-04-08 11 +205 val_205 2008-04-08 11 +120 val_120 2008-04-08 11 +131 val_131 2008-04-08 11 +51 val_51 2008-04-08 11 +404 val_404 2008-04-08 11 +43 val_43 2008-04-08 11 +436 val_436 2008-04-08 11 +156 val_156 2008-04-08 11 +469 val_469 2008-04-08 11 +468 val_468 2008-04-08 11 +308 val_308 2008-04-08 11 +95 val_95 2008-04-08 11 +196 val_196 2008-04-08 11 +288 val_288 2008-04-08 11 +481 val_481 2008-04-08 11 +457 val_457 2008-04-08 11 +98 val_98 2008-04-08 11 +282 val_282 2008-04-08 11 +197 val_197 2008-04-08 11 +187 val_187 2008-04-08 11 +318 val_318 2008-04-08 11 +318 val_318 2008-04-08 11 +409 val_409 2008-04-08 11 +470 val_470 2008-04-08 11 +137 val_137 2008-04-08 11 +369 val_369 2008-04-08 11 +316 val_316 2008-04-08 11 +169 val_169 2008-04-08 11 +413 val_413 2008-04-08 11 +85 val_85 2008-04-08 11 +77 val_77 2008-04-08 11 +0 val_0 2008-04-08 11 +490 val_490 2008-04-08 11 +87 val_87 2008-04-08 11 +364 val_364 2008-04-08 11 +179 val_179 2008-04-08 11 +118 val_118 2008-04-08 11 +134 val_134 2008-04-08 11 +395 val_395 2008-04-08 11 +282 val_282 2008-04-08 11 +138 val_138 2008-04-08 11 +238 val_238 2008-04-08 11 +419 val_419 2008-04-08 11 +15 val_15 2008-04-08 11 +118 val_118 2008-04-08 11 +72 val_72 2008-04-08 11 +90 val_90 2008-04-08 11 +307 val_307 2008-04-08 11 +19 val_19 2008-04-08 11 +435 val_435 2008-04-08 11 +10 val_10 2008-04-08 11 +277 val_277 2008-04-08 11 +273 val_273 2008-04-08 11 +306 val_306 2008-04-08 11 +224 val_224 2008-04-08 11 +309 val_309 2008-04-08 11 +389 val_389 2008-04-08 11 +327 val_327 2008-04-08 11 +242 val_242 2008-04-08 11 +369 val_369 2008-04-08 11 +392 val_392 2008-04-08 11 +272 val_272 2008-04-08 11 +331 val_331 2008-04-08 11 +401 val_401 2008-04-08 11 +242 val_242 2008-04-08 11 +452 val_452 2008-04-08 11 +177 val_177 2008-04-08 11 +226 val_226 2008-04-08 11 +5 val_5 2008-04-08 11 +497 val_497 2008-04-08 11 +402 val_402 2008-04-08 11 +396 val_396 2008-04-08 11 +317 val_317 2008-04-08 11 +395 val_395 2008-04-08 11 +58 val_58 2008-04-08 11 +35 val_35 2008-04-08 11 +336 val_336 2008-04-08 11 +95 val_95 2008-04-08 11 +11 val_11 2008-04-08 11 +168 val_168 2008-04-08 11 +34 val_34 2008-04-08 11 +229 val_229 2008-04-08 11 +233 val_233 2008-04-08 11 +143 val_143 2008-04-08 11 +472 val_472 2008-04-08 11 +322 val_322 2008-04-08 11 +498 val_498 2008-04-08 11 +160 val_160 2008-04-08 11 +195 val_195 2008-04-08 11 +42 val_42 2008-04-08 11 +321 val_321 2008-04-08 11 +430 val_430 2008-04-08 11 +119 val_119 2008-04-08 11 +489 val_489 2008-04-08 11 +458 val_458 2008-04-08 11 +78 val_78 2008-04-08 11 +76 val_76 2008-04-08 11 +41 val_41 2008-04-08 11 +223 val_223 2008-04-08 11 +492 val_492 2008-04-08 11 +149 val_149 2008-04-08 11 +449 val_449 2008-04-08 11 +218 val_218 2008-04-08 11 +228 val_228 2008-04-08 11 +138 val_138 2008-04-08 11 +453 val_453 2008-04-08 11 +30 val_30 2008-04-08 11 +209 val_209 2008-04-08 11 +64 val_64 2008-04-08 11 +468 val_468 2008-04-08 11 +76 val_76 2008-04-08 11 +74 val_74 2008-04-08 11 +342 val_342 2008-04-08 11 +69 val_69 2008-04-08 11 +230 val_230 2008-04-08 11 +33 val_33 2008-04-08 11 +368 val_368 2008-04-08 11 +103 val_103 2008-04-08 11 +296 val_296 2008-04-08 11 +113 val_113 2008-04-08 11 +216 val_216 2008-04-08 11 +367 val_367 2008-04-08 11 +344 val_344 2008-04-08 11 +167 val_167 2008-04-08 11 +274 val_274 2008-04-08 11 +219 val_219 2008-04-08 11 +239 val_239 2008-04-08 11 +485 val_485 2008-04-08 11 +116 val_116 2008-04-08 11 +223 val_223 2008-04-08 11 +256 val_256 2008-04-08 11 +263 val_263 2008-04-08 11 +70 val_70 2008-04-08 11 +487 val_487 2008-04-08 11 +480 val_480 2008-04-08 11 +401 val_401 2008-04-08 11 +288 val_288 2008-04-08 11 +191 val_191 2008-04-08 11 +5 val_5 2008-04-08 11 +244 val_244 2008-04-08 11 +438 val_438 2008-04-08 11 +128 val_128 2008-04-08 11 +467 val_467 2008-04-08 11 +432 val_432 2008-04-08 11 +202 val_202 2008-04-08 11 +316 val_316 2008-04-08 11 +229 val_229 2008-04-08 11 +469 val_469 2008-04-08 11 +463 val_463 2008-04-08 11 +280 val_280 2008-04-08 11 +2 val_2 2008-04-08 11 +35 val_35 2008-04-08 11 +283 val_283 2008-04-08 11 +331 val_331 2008-04-08 11 +235 val_235 2008-04-08 11 +80 val_80 2008-04-08 11 +44 val_44 2008-04-08 11 +193 val_193 2008-04-08 11 +321 val_321 2008-04-08 11 +335 val_335 2008-04-08 11 +104 val_104 2008-04-08 11 +466 val_466 2008-04-08 11 +366 val_366 2008-04-08 11 +175 val_175 2008-04-08 11 +403 val_403 2008-04-08 11 +483 val_483 2008-04-08 11 +53 val_53 2008-04-08 11 +105 val_105 2008-04-08 11 +257 val_257 2008-04-08 11 +406 val_406 2008-04-08 11 +409 val_409 2008-04-08 11 +190 val_190 2008-04-08 11 +406 val_406 2008-04-08 11 +401 val_401 2008-04-08 11 +114 val_114 2008-04-08 11 +258 val_258 2008-04-08 11 +90 val_90 2008-04-08 11 +203 val_203 2008-04-08 11 +262 val_262 2008-04-08 11 +348 val_348 2008-04-08 11 +424 val_424 2008-04-08 11 +12 val_12 2008-04-08 11 +396 val_396 2008-04-08 11 +201 val_201 2008-04-08 11 +217 val_217 2008-04-08 11 +164 val_164 2008-04-08 11 +431 val_431 2008-04-08 11 +454 val_454 2008-04-08 11 +478 val_478 2008-04-08 11 +298 val_298 2008-04-08 11 +125 val_125 2008-04-08 11 +431 val_431 2008-04-08 11 +164 val_164 2008-04-08 11 +424 val_424 2008-04-08 11 +187 val_187 2008-04-08 11 +382 val_382 2008-04-08 11 +5 val_5 2008-04-08 11 +70 val_70 2008-04-08 11 +397 val_397 2008-04-08 11 +480 val_480 2008-04-08 11 +291 val_291 2008-04-08 11 +24 val_24 2008-04-08 11 +351 val_351 2008-04-08 11 +255 val_255 2008-04-08 11 +104 val_104 2008-04-08 11 +70 val_70 2008-04-08 11 +163 val_163 2008-04-08 11 +438 val_438 2008-04-08 11 +119 val_119 2008-04-08 11 +414 val_414 2008-04-08 11 +200 val_200 2008-04-08 11 +491 val_491 2008-04-08 11 +237 val_237 2008-04-08 11 +439 val_439 2008-04-08 11 +360 val_360 2008-04-08 11 +248 val_248 2008-04-08 11 +479 val_479 2008-04-08 11 +305 val_305 2008-04-08 11 +417 val_417 2008-04-08 11 +199 val_199 2008-04-08 11 +444 val_444 2008-04-08 11 +120 val_120 2008-04-08 11 +429 val_429 2008-04-08 11 +169 val_169 2008-04-08 11 +443 val_443 2008-04-08 11 +323 val_323 2008-04-08 11 +325 val_325 2008-04-08 11 +277 val_277 2008-04-08 11 +230 val_230 2008-04-08 11 +478 val_478 2008-04-08 11 +178 val_178 2008-04-08 11 +468 val_468 2008-04-08 11 +310 val_310 2008-04-08 11 +317 val_317 2008-04-08 11 +333 val_333 2008-04-08 11 +493 val_493 2008-04-08 11 +460 val_460 2008-04-08 11 +207 val_207 2008-04-08 11 +249 val_249 2008-04-08 11 +265 val_265 2008-04-08 11 +480 val_480 2008-04-08 11 +83 val_83 2008-04-08 11 +136 val_136 2008-04-08 11 +353 val_353 2008-04-08 11 +172 val_172 2008-04-08 11 +214 val_214 2008-04-08 11 +462 val_462 2008-04-08 11 +233 val_233 2008-04-08 11 +406 val_406 2008-04-08 11 +133 val_133 2008-04-08 11 +175 val_175 2008-04-08 11 +189 val_189 2008-04-08 11 +454 val_454 2008-04-08 11 +375 val_375 2008-04-08 11 +401 val_401 2008-04-08 11 +421 val_421 2008-04-08 11 +407 val_407 2008-04-08 11 +384 val_384 2008-04-08 11 +256 val_256 2008-04-08 11 +26 val_26 2008-04-08 11 +134 val_134 2008-04-08 11 +67 val_67 2008-04-08 11 +384 val_384 2008-04-08 11 +379 val_379 2008-04-08 11 +18 val_18 2008-04-08 11 +462 val_462 2008-04-08 11 +492 val_492 2008-04-08 11 +100 val_100 2008-04-08 11 +298 val_298 2008-04-08 11 +9 val_9 2008-04-08 11 +341 val_341 2008-04-08 11 +498 val_498 2008-04-08 11 +146 val_146 2008-04-08 11 +458 val_458 2008-04-08 11 +362 val_362 2008-04-08 11 +186 val_186 2008-04-08 11 +285 val_285 2008-04-08 11 +348 val_348 2008-04-08 11 +167 val_167 2008-04-08 11 +18 val_18 2008-04-08 11 +273 val_273 2008-04-08 11 +183 val_183 2008-04-08 11 +281 val_281 2008-04-08 11 +344 val_344 2008-04-08 11 +97 val_97 2008-04-08 11 +469 val_469 2008-04-08 11 +315 val_315 2008-04-08 11 +84 val_84 2008-04-08 11 +28 val_28 2008-04-08 11 +37 val_37 2008-04-08 11 +448 val_448 2008-04-08 11 +152 val_152 2008-04-08 11 +348 val_348 2008-04-08 11 +307 val_307 2008-04-08 11 +194 val_194 2008-04-08 11 +414 val_414 2008-04-08 11 +477 val_477 2008-04-08 11 +222 val_222 2008-04-08 11 +126 val_126 2008-04-08 11 +90 val_90 2008-04-08 11 +169 val_169 2008-04-08 11 +403 val_403 2008-04-08 11 +400 val_400 2008-04-08 11 +200 val_200 2008-04-08 11 +97 val_97 2008-04-08 11 +PREHOOK: query: select count(1) from srcbucket +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from srcbucket +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket +#### A masked pattern was here #### +POSTHOOK: Lineage: dest1.dt SIMPLE [(srcpart)s.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: dest1.hr SIMPLE [(srcpart)s.FieldSchema(name:hr, type:string, comment:null), ] +POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)s.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(srcpart)s.FieldSchema(name:value, type:string, comment:default), ] +1000 Index: ql/src/test/results/clientpositive/tez/subquery_exists.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/subquery_exists.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/subquery_exists.q.out (working copy) @@ -0,0 +1,227 @@ +PREHOOK: query: -- no agg, corr +explain +select * +from src b +where exists + (select a.key + from src a + where b.value = a.value and a.key = b.key and a.value > 'val_9' + ) +PREHOOK: type: QUERY +POSTHOOK: query: -- no agg, corr +explain +select * +from src b +where exists + (select a.key + from src a + where b.value = a.value and a.key = b.key and a.value > 'val_9' + ) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) b)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (TOK_SUBQUERY_EXPR (TOK_SUBQUERY_OP exists) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key))) (TOK_WHERE (and (and (= (. (TOK_TABLE_OR_COL b) value) (. (TOK_TABLE_OR_COL a) value)) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key))) (> (. (TOK_TABLE_OR_COL a) value) 'val_9'))))))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + b + TableScan + alias: b + Reduce Output Operator + key expressions: + expr: value + type: string + expr: key + type: string + sort order: ++ + Map-reduce partition columns: + expr: value + type: string + expr: key + type: string + tag: 0 + value expressions: + expr: key + type: string + expr: value + type: string + Alias -> Map Operator Tree: + a + TableScan + alias: a + Filter Operator + predicate: + expr: (value > 'val_9') + type: boolean + Select Operator + expressions: + expr: value + type: string + expr: key + type: string + outputColumnNames: _col1, _col2 + Group By Operator + bucketGroup: false + keys: + expr: _col1 + type: string + expr: _col2 + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + sort order: ++ + Map-reduce partition columns: + expr: _col0 + type: string + expr: _col1 + type: string + tag: 1 + Reduce Operator Tree: + Join Operator + condition map: + Left Semi Join 0 to 1 + condition expressions: + 0 {VALUE._col0} {VALUE._col1} + 1 + handleSkewJoin: false + outputColumnNames: _col0, _col1 + Filter Operator + predicate: + expr: (1 = 1) + type: boolean + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select * +from src b +where exists + (select a.key + from src a + where b.value = a.value and a.key = b.key and a.value > 'val_9' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select * +from src b +where exists + (select a.key + from src a + where b.value = a.value and a.key = b.key and a.value > 'val_9' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- view test +create view cv1 as +select * +from src b +where exists + (select a.key + from src a + where b.value = a.value and a.key = b.key and a.value > 'val_9') +PREHOOK: type: CREATEVIEW +POSTHOOK: query: -- view test +create view cv1 as +select * +from src b +where exists + (select a.key + from src a + where b.value = a.value and a.key = b.key and a.value > 'val_9') +POSTHOOK: type: CREATEVIEW +POSTHOOK: Output: default@cv1 +PREHOOK: query: select * from cv1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cv1 +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select * from cv1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cv1 +POSTHOOK: Input: default@src +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- sq in from +select * +from (select * + from src b + where exists + (select a.key + from src a + where b.value = a.value and a.key = b.key and a.value > 'val_9') + ) a +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: -- sq in from +select * +from (select * + from src b + where exists + (select a.key + from src a + where b.value = a.value and a.key = b.key and a.value > 'val_9') + ) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 Index: ql/src/test/results/clientpositive/tez/subquery_in.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/subquery_in.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/subquery_in.q.out (working copy) @@ -0,0 +1,1171 @@ +PREHOOK: query: DROP TABLE part +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE part +POSTHOOK: type: DROPTABLE +PREHOOK: query: -- data setup +CREATE TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +) +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- data setup +CREATE TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@part +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part +PREHOOK: type: LOAD +PREHOOK: Output: default@part +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part +POSTHOOK: type: LOAD +POSTHOOK: Output: default@part +PREHOOK: query: DROP TABLE lineitem +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE lineitem +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, + L_PARTKEY INT, + L_SUPPKEY INT, + L_LINENUMBER INT, + L_QUANTITY DOUBLE, + L_EXTENDEDPRICE DOUBLE, + L_DISCOUNT DOUBLE, + L_TAX DOUBLE, + L_RETURNFLAG STRING, + L_LINESTATUS STRING, + l_shipdate STRING, + L_COMMITDATE STRING, + L_RECEIPTDATE STRING, + L_SHIPINSTRUCT STRING, + L_SHIPMODE STRING, + L_COMMENT STRING) +ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|' +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, + L_PARTKEY INT, + L_SUPPKEY INT, + L_LINENUMBER INT, + L_QUANTITY DOUBLE, + L_EXTENDEDPRICE DOUBLE, + L_DISCOUNT DOUBLE, + L_TAX DOUBLE, + L_RETURNFLAG STRING, + L_LINESTATUS STRING, + l_shipdate STRING, + L_COMMITDATE STRING, + L_RECEIPTDATE STRING, + L_SHIPINSTRUCT STRING, + L_SHIPMODE STRING, + L_COMMENT STRING) +ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|' +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@lineitem +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem +PREHOOK: type: LOAD +PREHOOK: Output: default@lineitem +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem +POSTHOOK: type: LOAD +POSTHOOK: Output: default@lineitem +PREHOOK: query: -- non agg, non corr +explain + select * +from src +where src.key in (select key from src s1 where s1.key > '9') +PREHOOK: type: QUERY +POSTHOOK: query: -- non agg, non corr +explain + select * +from src +where src.key in (select key from src s1 where s1.key > '9') +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (TOK_SUBQUERY_EXPR (TOK_SUBQUERY_OP in) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) s1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))) (TOK_WHERE (> (. (TOK_TABLE_OR_COL s1) key) '9')))) (. (TOK_TABLE_OR_COL src) key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + s1 + TableScan + alias: s1 + Filter Operator + predicate: + expr: (key > '9') + type: boolean + Select Operator + expressions: + expr: key + type: string + outputColumnNames: _col0 + Group By Operator + bucketGroup: false + keys: + expr: _col0 + type: string + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: 1 + Alias -> Map Operator Tree: + src + TableScan + alias: src + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 0 + value expressions: + expr: key + type: string + expr: value + type: string + Reduce Operator Tree: + Join Operator + condition map: + Left Semi Join 0 to 1 + condition expressions: + 0 {VALUE._col0} {VALUE._col1} + 1 + handleSkewJoin: false + outputColumnNames: _col0, _col1 + Filter Operator + predicate: + expr: (1 = 1) + type: boolean + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select * +from src +where src.key in (select key from src s1 where s1.key > '9') +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select * +from src +where src.key in (select key from src s1 where s1.key > '9') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- non agg, corr +explain +select * +from src b +where b.key in + (select a.key + from src a + where b.value = a.value and a.key > '9' + ) +PREHOOK: type: QUERY +POSTHOOK: query: -- non agg, corr +explain +select * +from src b +where b.key in + (select a.key + from src a + where b.value = a.value and a.key > '9' + ) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) b)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (TOK_SUBQUERY_EXPR (TOK_SUBQUERY_OP in) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key))) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL b) value) (. (TOK_TABLE_OR_COL a) value)) (> (. (TOK_TABLE_OR_COL a) key) '9'))))) (. (TOK_TABLE_OR_COL b) key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + b + TableScan + alias: b + Reduce Output Operator + key expressions: + expr: key + type: string + expr: value + type: string + sort order: ++ + Map-reduce partition columns: + expr: key + type: string + expr: value + type: string + tag: 0 + value expressions: + expr: key + type: string + expr: value + type: string + Alias -> Map Operator Tree: + a + TableScan + alias: a + Filter Operator + predicate: + expr: (key > '9') + type: boolean + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: _col0, _col1 + Group By Operator + bucketGroup: false + keys: + expr: _col0 + type: string + expr: _col1 + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + sort order: ++ + Map-reduce partition columns: + expr: _col0 + type: string + expr: _col1 + type: string + tag: 1 + Reduce Operator Tree: + Join Operator + condition map: + Left Semi Join 0 to 1 + condition expressions: + 0 {VALUE._col0} {VALUE._col1} + 1 + handleSkewJoin: false + outputColumnNames: _col0, _col1 + Filter Operator + predicate: + expr: (1 = 1) + type: boolean + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select * +from src b +where b.key in + (select a.key + from src a + where b.value = a.value and a.key > '9' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select * +from src b +where b.key in + (select a.key + from src a + where b.value = a.value and a.key > '9' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- agg, non corr +explain +select p_name, p_size +from +part where part.p_size in + (select avg(p_size) + from (select p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a + where r <= 2 + ) +PREHOOK: type: QUERY +POSTHOOK: query: -- agg, non corr +explain +select p_name, p_size +from +part where part.p_size in + (select avg(p_size) + from (select p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a + where r <= 2 + ) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME part))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL p_name)) (TOK_SELEXPR (TOK_TABLE_OR_COL p_size))) (TOK_WHERE (TOK_SUBQUERY_EXPR (TOK_SUBQUERY_OP in) (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME part))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL p_size)) (TOK_SELEXPR (TOK_FUNCTION rank (TOK_WINDOWSPEC (TOK_PARTITIONINGSPEC (TOK_DISTRIBUTEBY (TOK_TABLE_OR_COL p_mfgr)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL p_size)))))) r)))) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION avg (TOK_TABLE_OR_COL p_size)))) (TOK_WHERE (<= (TOK_TABLE_OR_COL r) 2)))) (. (TOK_TABLE_OR_COL part) p_size))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + part + TableScan + alias: part + Reduce Output Operator + key expressions: + expr: p_mfgr + type: string + expr: p_size + type: int + sort order: ++ + Map-reduce partition columns: + expr: p_mfgr + type: string + tag: -1 + value expressions: + expr: p_mfgr + type: string + expr: p_size + type: int + Reduce Operator Tree: + Extract + PTF Operator + Filter Operator + predicate: + expr: (_wcol0 <= 2) + type: boolean + Select Operator + expressions: + expr: _col5 + type: int + outputColumnNames: _col0 + Group By Operator + aggregations: + expr: avg(_col0) + bucketGroup: false + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + tag: -1 + value expressions: + expr: _col0 + type: struct + Reduce Operator Tree: + Group By Operator + aggregations: + expr: avg(VALUE._col0) + bucketGroup: false + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: + expr: _col0 + type: double + outputColumnNames: _col0 + Group By Operator + bucketGroup: false + keys: + expr: _col0 + type: double + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: + expr: _col0 + type: double + sort order: + + Map-reduce partition columns: + expr: _col0 + type: double + tag: 1 + Alias -> Map Operator Tree: + part + TableScan + alias: part + Reduce Output Operator + key expressions: + expr: UDFToDouble(p_size) + type: double + sort order: + + Map-reduce partition columns: + expr: UDFToDouble(p_size) + type: double + tag: 0 + value expressions: + expr: p_name + type: string + expr: p_size + type: int + Reduce Operator Tree: + Join Operator + condition map: + Left Semi Join 0 to 1 + condition expressions: + 0 {VALUE._col1} {VALUE._col5} + 1 + handleSkewJoin: false + outputColumnNames: _col1, _col5 + Filter Operator + predicate: + expr: (1 = 1) + type: boolean + Select Operator + expressions: + expr: _col1 + type: string + expr: _col5 + type: int + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select p_name, p_size +from +part where part.p_size in + (select avg(p_size) + from (select p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a + where r <= 2 + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: select p_name, p_size +from +part where part.p_size in + (select avg(p_size) + from (select p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a + where r <= 2 + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +almond antique medium spring khaki 6 +almond antique salmon chartreuse burlywood 6 +PREHOOK: query: -- agg, corr +explain +select p_mfgr, p_name, p_size +from part b where b.p_size in + (select min(p_size) + from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a + where r <= 2 and b.p_mfgr = a.p_mfgr + ) +PREHOOK: type: QUERY +POSTHOOK: query: -- agg, corr +explain +select p_mfgr, p_name, p_size +from part b where b.p_size in + (select min(p_size) + from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a + where r <= 2 and b.p_mfgr = a.p_mfgr + ) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME part) b)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL p_mfgr)) (TOK_SELEXPR (TOK_TABLE_OR_COL p_name)) (TOK_SELEXPR (TOK_TABLE_OR_COL p_size))) (TOK_WHERE (TOK_SUBQUERY_EXPR (TOK_SUBQUERY_OP in) (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME part))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL p_mfgr)) (TOK_SELEXPR (TOK_TABLE_OR_COL p_size)) (TOK_SELEXPR (TOK_FUNCTION rank (TOK_WINDOWSPEC (TOK_PARTITIONINGSPEC (TOK_DISTRIBUTEBY (TOK_TABLE_OR_COL p_mfgr)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL p_size)))))) r)))) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION min (TOK_TABLE_OR_COL p_size)))) (TOK_WHERE (and (<= (TOK_TABLE_OR_COL r) 2) (= (. (TOK_TABLE_OR_COL b) p_mfgr) (. (TOK_TABLE_OR_COL a) p_mfgr)))))) (. (TOK_TABLE_OR_COL b) p_size))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + b + TableScan + alias: b + Reduce Output Operator + key expressions: + expr: p_size + type: int + expr: p_mfgr + type: string + sort order: ++ + Map-reduce partition columns: + expr: p_size + type: int + expr: p_mfgr + type: string + tag: 0 + value expressions: + expr: p_name + type: string + expr: p_mfgr + type: string + expr: p_size + type: int + Alias -> Map Operator Tree: + part + TableScan + alias: part + Reduce Output Operator + key expressions: + expr: p_mfgr + type: string + expr: p_size + type: int + sort order: ++ + Map-reduce partition columns: + expr: p_mfgr + type: string + tag: -1 + value expressions: + expr: p_mfgr + type: string + expr: p_size + type: int + Reduce Operator Tree: + Extract + PTF Operator + Filter Operator + predicate: + expr: (_wcol0 <= 2) + type: boolean + Select Operator + expressions: + expr: _col2 + type: string + expr: _col5 + type: int + outputColumnNames: _col0, _col1 + Group By Operator + aggregations: + expr: min(_col1) + bucketGroup: false + keys: + expr: _col0 + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: int + Reduce Operator Tree: + Group By Operator + aggregations: + expr: min(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col1 + type: int + expr: _col0 + type: string + outputColumnNames: _col0, _col1 + Group By Operator + bucketGroup: false + keys: + expr: _col0 + type: int + expr: _col1 + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: int + expr: _col1 + type: string + sort order: ++ + Map-reduce partition columns: + expr: _col0 + type: int + expr: _col1 + type: string + tag: 1 + Reduce Operator Tree: + Join Operator + condition map: + Left Semi Join 0 to 1 + condition expressions: + 0 {VALUE._col1} {VALUE._col2} {VALUE._col5} + 1 + handleSkewJoin: false + outputColumnNames: _col1, _col2, _col5 + Filter Operator + predicate: + expr: (1 = 1) + type: boolean + Select Operator + expressions: + expr: _col2 + type: string + expr: _col1 + type: string + expr: _col5 + type: int + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select p_mfgr, p_name, p_size +from part b where b.p_size in + (select min(p_size) + from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a + where r <= 2 and b.p_mfgr = a.p_mfgr + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: select p_mfgr, p_name, p_size +from part b where b.p_size in + (select min(p_size) + from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a + where r <= 2 and b.p_mfgr = a.p_mfgr + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#3 almond antique misty red olive 1 +Manufacturer#1 almond antique burnished rose metallic 2 +Manufacturer#1 almond antique burnished rose metallic 2 +Manufacturer#2 almond aquamarine midnight light salmon 2 +Manufacturer#5 almond antique sky peru orange 2 +Manufacturer#4 almond aquamarine yellow dodger mint 7 +PREHOOK: query: -- distinct, corr +explain +select * +from src b +where b.key in + (select distinct a.key + from src a + where b.value = a.value and a.key > '9' + ) +PREHOOK: type: QUERY +POSTHOOK: query: -- distinct, corr +explain +select * +from src b +where b.key in + (select distinct a.key + from src a + where b.value = a.value and a.key > '9' + ) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) b)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (TOK_SUBQUERY_EXPR (TOK_SUBQUERY_OP in) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECTDI (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key))) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL b) value) (. (TOK_TABLE_OR_COL a) value)) (> (. (TOK_TABLE_OR_COL a) key) '9'))))) (. (TOK_TABLE_OR_COL b) key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + b + TableScan + alias: b + Reduce Output Operator + key expressions: + expr: key + type: string + expr: value + type: string + sort order: ++ + Map-reduce partition columns: + expr: key + type: string + expr: value + type: string + tag: 0 + value expressions: + expr: key + type: string + expr: value + type: string + Alias -> Map Operator Tree: + a + TableScan + alias: a + Filter Operator + predicate: + expr: (key > '9') + type: boolean + Select Operator + expressions: + expr: key + type: string + expr: value + type: string + outputColumnNames: key, value + Group By Operator + bucketGroup: false + keys: + expr: key + type: string + expr: value + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + sort order: ++ + Map-reduce partition columns: + expr: _col0 + type: string + expr: _col1 + type: string + tag: -1 + Reduce Operator Tree: + Group By Operator + bucketGroup: false + keys: + expr: KEY._col0 + type: string + expr: KEY._col1 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + Group By Operator + bucketGroup: false + keys: + expr: _col0 + type: string + expr: _col1 + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + expr: _col1 + type: string + sort order: ++ + Map-reduce partition columns: + expr: _col0 + type: string + expr: _col1 + type: string + tag: 1 + Reduce Operator Tree: + Join Operator + condition map: + Left Semi Join 0 to 1 + condition expressions: + 0 {VALUE._col0} {VALUE._col1} + 1 + handleSkewJoin: false + outputColumnNames: _col0, _col1 + Filter Operator + predicate: + expr: (1 = 1) + type: boolean + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select * +from src b +where b.key in + (select distinct a.key + from src a + where b.value = a.value and a.key > '9' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select * +from src b +where b.key in + (select distinct a.key + from src a + where b.value = a.value and a.key > '9' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- non agg, non corr, windowing +select p_mfgr, p_name, p_size +from part +where part.p_size in + (select first_value(p_size) over(partition by p_mfgr order by p_size) from part) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, non corr, windowing +select p_mfgr, p_name, p_size +from part +where part.p_size in + (select first_value(p_size) over(partition by p_mfgr order by p_size) from part) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#3 almond antique misty red olive 1 +Manufacturer#1 almond antique burnished rose metallic 2 +Manufacturer#5 almond antique sky peru orange 2 +Manufacturer#2 almond aquamarine midnight light salmon 2 +Manufacturer#1 almond antique burnished rose metallic 2 +Manufacturer#4 almond aquamarine yellow dodger mint 7 +PREHOOK: query: -- non agg, non corr, with join in Parent Query +explain +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR') +PREHOOK: type: QUERY +POSTHOOK: query: -- non agg, non corr, with join in Parent Query +explain +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR') +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME lineitem))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECTDI (TOK_SELEXPR (TOK_TABLE_OR_COL l_partkey) p_partkey)))) p) (TOK_TABREF (TOK_TABNAME lineitem) li) (= (. (TOK_TABLE_OR_COL p) p_partkey) (. (TOK_TABLE_OR_COL li) l_partkey)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL p) p_partkey)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL li) l_suppkey))) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL li) l_linenumber) 1) (TOK_SUBQUERY_EXPR (TOK_SUBQUERY_OP in) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME lineitem))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL l_orderkey))) (TOK_WHERE (= (TOK_TABLE_OR_COL l_shipmode) 'AIR')))) (. (TOK_TABLE_OR_COL li) l_orderkey)))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + li + TableScan + alias: li + Filter Operator + predicate: + expr: (l_linenumber = 1) + type: boolean + Reduce Output Operator + key expressions: + expr: l_partkey + type: int + sort order: + + Map-reduce partition columns: + expr: l_partkey + type: int + tag: 1 + value expressions: + expr: l_orderkey + type: int + expr: l_suppkey + type: int + Alias -> Map Operator Tree: + lineitem + TableScan + alias: lineitem + Select Operator + expressions: + expr: l_partkey + type: int + outputColumnNames: l_partkey + Group By Operator + bucketGroup: false + keys: + expr: l_partkey + type: int + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: + expr: _col0 + type: int + sort order: + + Map-reduce partition columns: + expr: _col0 + type: int + tag: -1 + Reduce Operator Tree: + Group By Operator + bucketGroup: false + keys: + expr: KEY._col0 + type: int + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: + expr: _col0 + type: int + outputColumnNames: _col0 + Reduce Output Operator + key expressions: + expr: _col0 + type: int + sort order: + + Map-reduce partition columns: + expr: _col0 + type: int + tag: 0 + value expressions: + expr: _col0 + type: int + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col0} + 1 {VALUE._col0} {VALUE._col2} + handleSkewJoin: false + outputColumnNames: _col0, _col1, _col3 + Reduce Output Operator + key expressions: + expr: _col1 + type: int + sort order: + + Map-reduce partition columns: + expr: _col1 + type: int + tag: 0 + value expressions: + expr: _col3 + type: int + expr: _col0 + type: int + Alias -> Map Operator Tree: + lineitem + TableScan + alias: lineitem + Filter Operator + predicate: + expr: (l_shipmode = 'AIR') + type: boolean + Select Operator + expressions: + expr: l_orderkey + type: int + outputColumnNames: _col0 + Group By Operator + bucketGroup: false + keys: + expr: _col0 + type: int + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: + expr: _col0 + type: int + sort order: + + Map-reduce partition columns: + expr: _col0 + type: int + tag: 1 + Reduce Operator Tree: + Join Operator + condition map: + Left Semi Join 0 to 1 + condition expressions: + 0 {VALUE._col2} {VALUE._col18} + 1 + handleSkewJoin: false + outputColumnNames: _col2, _col18 + Select Operator + expressions: + expr: _col18 + type: int + expr: _col2 + type: int + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR') +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +#### A masked pattern was here #### +POSTHOOK: query: select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +#### A masked pattern was here #### +155190 7706 +4297 1798 +108570 8571 +82704 7721 +61336 8855 +2320 9821 +115118 7630 +115209 7721 +64128 9141 +40216 217 +PREHOOK: query: -- non agg, corr, with join in Parent Query +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, corr, with join in Parent Query +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +#### A masked pattern was here #### +4297 1798 +108570 8571 Index: ql/src/test/results/clientpositive/tez/tez_dml.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/tez_dml.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/tez_dml.q.out (working copy) @@ -0,0 +1,1787 @@ +PREHOOK: query: -- CTAS +EXPLAIN CREATE TABLE tmp_src AS SELECT * FROM (SELECT value, count(value) AS cnt FROM src GROUP BY value) f1 ORDER BY cnt +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: -- CTAS +EXPLAIN CREATE TABLE tmp_src AS SELECT * FROM (SELECT value, count(value) AS cnt FROM src GROUP BY value) f1 ORDER BY cnt +POSTHOOK: type: CREATETABLE_AS_SELECT +ABSTRACT SYNTAX TREE: + (TOK_CREATETABLE (TOK_TABNAME tmp_src) TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL value)) cnt)) (TOK_GROUPBY (TOK_TABLE_OR_COL value)))) f1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL cnt)))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-9 depends on stages: Stage-2, Stage-0 + Stage-3 depends on stages: Stage-9 + Stage-0 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Select Operator + expressions: + expr: value + type: string + outputColumnNames: value + Group By Operator + aggregations: + expr: count(value) + bucketGroup: false + keys: + expr: value + type: string + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + Map-reduce partition columns: + expr: _col0 + type: string + tag: -1 + value expressions: + expr: _col1 + type: bigint + Reduce Operator Tree: + Group By Operator + aggregations: + expr: count(VALUE._col0) + bucketGroup: false + keys: + expr: KEY._col0 + type: string + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col1 + type: bigint + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: bigint + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmp_src + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-9 + Create Table Operator: + Create Table + columns: value string, cnt bigint + if not exists: false + input format: org.apache.hadoop.mapred.TextInputFormat + # buckets: -1 + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: tmp_src + isExternal: false + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmp_src + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmp_src + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: CREATE TABLE tmp_src AS SELECT * FROM (SELECT value, count(value) AS cnt FROM src GROUP BY value) f1 ORDER BY cnt +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: CREATE TABLE tmp_src AS SELECT * FROM (SELECT value, count(value) AS cnt FROM src GROUP BY value) f1 ORDER BY cnt +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tmp_src +PREHOOK: query: SELECT * FROM tmp_src +PREHOOK: type: QUERY +PREHOOK: Input: default@tmp_src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM tmp_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tmp_src +#### A masked pattern was here #### +val_490 1 +val_287 1 +val_286 1 +val_285 1 +val_284 1 +val_283 1 +val_114 1 +val_487 1 +val_485 1 +val_28 1 +val_484 1 +val_181 1 +val_275 1 +val_274 1 +val_183 1 +val_483 1 +val_27 1 +val_266 1 +val_482 1 +val_263 1 +val_262 1 +val_260 1 +val_481 1 +val_258 1 +val_257 1 +val_116 1 +val_479 1 +val_252 1 +val_249 1 +val_248 1 +val_247 1 +val_244 1 +val_92 1 +val_241 1 +val_477 1 +val_475 1 +val_472 1 +val_470 1 +val_235 1 +val_47 1 +val_186 1 +val_126 1 +val_228 1 +val_226 1 +val_131 1 +val_467 1 +val_222 1 +val_133 1 +val_82 1 +val_218 1 +val_80 1 +val_460 1 +val_214 1 +val_8 1 +val_78 1 +val_189 1 +val_457 1 +val_455 1 +val_136 1 +val_202 1 +val_201 1 +val_453 1 +val_20 1 +val_2 1 +val_19 1 +val_452 1 +val_196 1 +val_449 1 +val_194 1 +val_190 1 +val_192 1 +val_448 1 +val_446 1 +val_444 1 +val_443 1 +val_44 1 +val_77 1 +val_143 1 +val_437 1 +val_436 1 +val_435 1 +val_432 1 +val_145 1 +val_150 1 +val_43 1 +val_10 1 +val_427 1 +val_74 1 +val_421 1 +val_9 1 +val_419 1 +val_418 1 +val_153 1 +val_105 1 +val_69 1 +val_411 1 +val_41 1 +val_155 1 +val_407 1 +val_156 1 +val_87 1 +val_157 1 +val_402 1 +val_158 1 +val_400 1 +val_4 1 +val_66 1 +val_65 1 +val_160 1 +val_64 1 +val_394 1 +val_393 1 +val_392 1 +val_389 1 +val_386 1 +val_162 1 +val_86 1 +val_379 1 +val_378 1 +val_377 1 +val_375 1 +val_374 1 +val_373 1 +val_57 1 +val_163 1 +val_368 1 +val_54 1 +val_366 1 +val_365 1 +val_364 1 +val_362 1 +val_360 1 +val_356 1 +val_53 1 +val_351 1 +val_166 1 +val_168 1 +val_345 1 +val_85 1 +val_11 1 +val_341 1 +val_34 1 +val_339 1 +val_338 1 +val_336 1 +val_335 1 +val_111 1 +val_332 1 +val_497 1 +val_33 1 +val_17 1 +val_496 1 +val_323 1 +val_495 1 +val_494 1 +val_170 1 +val_493 1 +val_177 1 +val_315 1 +val_178 1 +val_310 1 +val_96 1 +val_308 1 +val_491 1 +val_306 1 +val_305 1 +val_302 1 +val_30 1 +val_180 1 +val_296 1 +val_292 1 +val_291 1 +val_289 1 +val_98 2 +val_97 2 +val_95 2 +val_84 2 +val_83 2 +val_76 2 +val_72 2 +val_67 2 +val_58 2 +val_51 2 +val_492 2 +val_478 2 +val_463 2 +val_462 2 +val_459 2 +val_458 2 +val_439 2 +val_429 2 +val_424 2 +val_42 2 +val_414 2 +val_413 2 +val_404 2 +val_399 2 +val_397 2 +val_395 2 +val_382 2 +val_37 2 +val_367 2 +val_353 2 +val_344 2 +val_342 2 +val_333 2 +val_331 2 +val_325 2 +val_322 2 +val_321 2 +val_317 2 +val_309 2 +val_307 2 +val_288 2 +val_282 2 +val_281 2 +val_280 2 +val_278 2 +val_272 2 +val_265 2 +val_26 2 +val_256 2 +val_255 2 +val_242 2 +val_24 2 +val_239 2 +val_238 2 +val_237 2 +val_233 2 +val_229 2 +val_224 2 +val_223 2 +val_221 2 +val_219 2 +val_217 2 +val_216 2 +val_213 2 +val_209 2 +val_207 2 +val_205 2 +val_203 2 +val_200 2 +val_197 2 +val_195 2 +val_191 2 +val_18 2 +val_179 2 +val_176 2 +val_175 2 +val_174 2 +val_172 2 +val_165 2 +val_164 2 +val_152 2 +val_15 2 +val_149 2 +val_146 2 +val_137 2 +val_134 2 +val_129 2 +val_125 2 +val_120 2 +val_12 2 +val_118 2 +val_113 2 +val_104 2 +val_103 2 +val_100 2 +val_498 3 +val_369 3 +val_384 3 +val_396 3 +val_403 3 +val_409 3 +val_417 3 +val_5 3 +val_430 3 +val_70 3 +val_119 3 +val_0 3 +val_431 3 +val_438 3 +val_480 3 +val_193 3 +val_199 3 +val_208 3 +val_187 3 +val_273 3 +val_298 3 +val_454 3 +val_311 3 +val_316 3 +val_466 3 +val_90 3 +val_128 3 +val_318 3 +val_327 3 +val_167 3 +val_35 3 +val_468 4 +val_489 4 +val_406 4 +val_169 4 +val_138 4 +val_277 4 +val_469 5 +val_401 5 +val_230 5 +val_348 5 +PREHOOK: query: -- dyn partitions +CREATE TABLE tmp_src_part (c string) PARTITIONED BY (d int) +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- dyn partitions +CREATE TABLE tmp_src_part (c string) PARTITIONED BY (d int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@tmp_src_part +PREHOOK: query: EXPLAIN INSERT INTO TABLE tmp_src_part PARTITION (d) SELECT * FROM tmp_src +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE tmp_src_part PARTITION (d) SELECT * FROM tmp_src +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME tmp_src))) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME tmp_src_part) (TOK_PARTSPEC (TOK_PARTVAL d)))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + tmp_src + TableScan + alias: tmp_src + Select Operator + expressions: + expr: value + type: string + expr: cnt + type: bigint + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmp_src_part + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + d + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmp_src_part + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-4 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmp_src_part + + Stage: Stage-6 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmp_src_part + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: INSERT INTO TABLE tmp_src_part PARTITION (d) SELECT * FROM tmp_src +PREHOOK: type: QUERY +PREHOOK: Input: default@tmp_src +PREHOOK: Output: default@tmp_src_part +POSTHOOK: query: INSERT INTO TABLE tmp_src_part PARTITION (d) SELECT * FROM tmp_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tmp_src +POSTHOOK: Output: default@tmp_src_part@d=1 +POSTHOOK: Output: default@tmp_src_part@d=2 +POSTHOOK: Output: default@tmp_src_part@d=3 +POSTHOOK: Output: default@tmp_src_part@d=4 +POSTHOOK: Output: default@tmp_src_part@d=5 +POSTHOOK: Lineage: tmp_src_part PARTITION(d=1).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=2).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=3).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=4).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=5).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM tmp_src_part +PREHOOK: type: QUERY +PREHOOK: Input: default@tmp_src_part +PREHOOK: Input: default@tmp_src_part@d=1 +PREHOOK: Input: default@tmp_src_part@d=2 +PREHOOK: Input: default@tmp_src_part@d=3 +PREHOOK: Input: default@tmp_src_part@d=4 +PREHOOK: Input: default@tmp_src_part@d=5 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM tmp_src_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tmp_src_part +POSTHOOK: Input: default@tmp_src_part@d=1 +POSTHOOK: Input: default@tmp_src_part@d=2 +POSTHOOK: Input: default@tmp_src_part@d=3 +POSTHOOK: Input: default@tmp_src_part@d=4 +POSTHOOK: Input: default@tmp_src_part@d=5 +#### A masked pattern was here #### +POSTHOOK: Lineage: tmp_src_part PARTITION(d=1).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=2).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=3).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=4).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=5).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +val_490 1 +val_287 1 +val_286 1 +val_285 1 +val_284 1 +val_283 1 +val_114 1 +val_487 1 +val_485 1 +val_28 1 +val_484 1 +val_181 1 +val_275 1 +val_274 1 +val_183 1 +val_483 1 +val_27 1 +val_266 1 +val_482 1 +val_263 1 +val_262 1 +val_260 1 +val_481 1 +val_258 1 +val_257 1 +val_116 1 +val_479 1 +val_252 1 +val_249 1 +val_248 1 +val_247 1 +val_244 1 +val_92 1 +val_241 1 +val_477 1 +val_475 1 +val_472 1 +val_470 1 +val_235 1 +val_47 1 +val_186 1 +val_126 1 +val_228 1 +val_226 1 +val_131 1 +val_467 1 +val_222 1 +val_133 1 +val_82 1 +val_218 1 +val_80 1 +val_460 1 +val_214 1 +val_8 1 +val_78 1 +val_189 1 +val_457 1 +val_455 1 +val_136 1 +val_202 1 +val_201 1 +val_453 1 +val_20 1 +val_2 1 +val_19 1 +val_452 1 +val_196 1 +val_449 1 +val_194 1 +val_190 1 +val_192 1 +val_448 1 +val_446 1 +val_444 1 +val_443 1 +val_44 1 +val_77 1 +val_143 1 +val_437 1 +val_436 1 +val_435 1 +val_432 1 +val_145 1 +val_150 1 +val_43 1 +val_10 1 +val_427 1 +val_74 1 +val_421 1 +val_9 1 +val_419 1 +val_418 1 +val_153 1 +val_105 1 +val_69 1 +val_411 1 +val_41 1 +val_155 1 +val_407 1 +val_156 1 +val_87 1 +val_157 1 +val_402 1 +val_158 1 +val_400 1 +val_4 1 +val_66 1 +val_65 1 +val_160 1 +val_64 1 +val_394 1 +val_393 1 +val_392 1 +val_389 1 +val_386 1 +val_162 1 +val_86 1 +val_379 1 +val_378 1 +val_377 1 +val_375 1 +val_374 1 +val_373 1 +val_57 1 +val_163 1 +val_368 1 +val_54 1 +val_366 1 +val_365 1 +val_364 1 +val_362 1 +val_360 1 +val_356 1 +val_53 1 +val_351 1 +val_166 1 +val_168 1 +val_345 1 +val_85 1 +val_11 1 +val_341 1 +val_34 1 +val_339 1 +val_338 1 +val_336 1 +val_335 1 +val_111 1 +val_332 1 +val_497 1 +val_33 1 +val_17 1 +val_496 1 +val_323 1 +val_495 1 +val_494 1 +val_170 1 +val_493 1 +val_177 1 +val_315 1 +val_178 1 +val_310 1 +val_96 1 +val_308 1 +val_491 1 +val_306 1 +val_305 1 +val_302 1 +val_30 1 +val_180 1 +val_296 1 +val_292 1 +val_291 1 +val_289 1 +val_98 2 +val_97 2 +val_95 2 +val_84 2 +val_83 2 +val_76 2 +val_72 2 +val_67 2 +val_58 2 +val_51 2 +val_492 2 +val_478 2 +val_463 2 +val_462 2 +val_459 2 +val_458 2 +val_439 2 +val_429 2 +val_424 2 +val_42 2 +val_414 2 +val_413 2 +val_404 2 +val_399 2 +val_397 2 +val_395 2 +val_382 2 +val_37 2 +val_367 2 +val_353 2 +val_344 2 +val_342 2 +val_333 2 +val_331 2 +val_325 2 +val_322 2 +val_321 2 +val_317 2 +val_309 2 +val_307 2 +val_288 2 +val_282 2 +val_281 2 +val_280 2 +val_278 2 +val_272 2 +val_265 2 +val_26 2 +val_256 2 +val_255 2 +val_242 2 +val_24 2 +val_239 2 +val_238 2 +val_237 2 +val_233 2 +val_229 2 +val_224 2 +val_223 2 +val_221 2 +val_219 2 +val_217 2 +val_216 2 +val_213 2 +val_209 2 +val_207 2 +val_205 2 +val_203 2 +val_200 2 +val_197 2 +val_195 2 +val_191 2 +val_18 2 +val_179 2 +val_176 2 +val_175 2 +val_174 2 +val_172 2 +val_165 2 +val_164 2 +val_152 2 +val_15 2 +val_149 2 +val_146 2 +val_137 2 +val_134 2 +val_129 2 +val_125 2 +val_120 2 +val_12 2 +val_118 2 +val_113 2 +val_104 2 +val_103 2 +val_100 2 +val_498 3 +val_369 3 +val_384 3 +val_396 3 +val_403 3 +val_409 3 +val_417 3 +val_5 3 +val_430 3 +val_70 3 +val_119 3 +val_0 3 +val_431 3 +val_438 3 +val_480 3 +val_193 3 +val_199 3 +val_208 3 +val_187 3 +val_273 3 +val_298 3 +val_454 3 +val_311 3 +val_316 3 +val_466 3 +val_90 3 +val_128 3 +val_318 3 +val_327 3 +val_167 3 +val_35 3 +val_468 4 +val_489 4 +val_406 4 +val_169 4 +val_138 4 +val_277 4 +val_469 5 +val_401 5 +val_230 5 +val_348 5 +PREHOOK: query: -- multi insert +CREATE TABLE even (c int, d string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- multi insert +CREATE TABLE even (c int, d string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@even +POSTHOOK: Lineage: tmp_src_part PARTITION(d=1).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=2).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=3).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=4).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=5).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE odd (c int, d string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE odd (c int, d string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@odd +POSTHOOK: Lineage: tmp_src_part PARTITION(d=1).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=2).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=3).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=4).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=5).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: EXPLAIN +FROM src +INSERT INTO TABLE even SELECT key, value WHERE key % 2 = 0 +INSERT INTO TABLE odd SELECT key, value WHERE key % 2 = 1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +FROM src +INSERT INTO TABLE even SELECT key, value WHERE key % 2 = 0 +INSERT INTO TABLE odd SELECT key, value WHERE key % 2 = 1 +POSTHOOK: type: QUERY +POSTHOOK: Lineage: tmp_src_part PARTITION(d=1).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=2).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=3).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=4).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=5).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME even))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_WHERE (= (% (TOK_TABLE_OR_COL key) 2) 0))) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME odd))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_WHERE (= (% (TOK_TABLE_OR_COL key) 2) 1)))) + +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-9 depends on stages: Stage-2 , consists of Stage-6, Stage-5, Stage-7 + Stage-6 + Stage-3 depends on stages: Stage-6, Stage-5, Stage-8, Stage-12, Stage-11, Stage-14 + Stage-0 depends on stages: Stage-3 + Stage-4 depends on stages: Stage-0 + Stage-1 depends on stages: Stage-3 + Stage-10 depends on stages: Stage-1 + Stage-5 + Stage-7 + Stage-8 depends on stages: Stage-7 + Stage-15 depends on stages: Stage-2 , consists of Stage-12, Stage-11, Stage-13 + Stage-12 + Stage-11 + Stage-13 + Stage-14 depends on stages: Stage-13 + +STAGE PLANS: + Stage: Stage-2 + Tez + Alias -> Map Operator Tree: + src + TableScan + alias: src + Filter Operator + predicate: + expr: ((key % 2) = 0) + type: boolean + Select Operator + expressions: + expr: UDFToInteger(key) + type: int + expr: value + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.even + Filter Operator + predicate: + expr: ((key % 2) = 1) + type: boolean + Select Operator + expressions: + expr: UDFToInteger(key) + type: int + expr: value + type: string + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 2 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.odd + + Stage: Stage-9 + Conditional Operator + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-3 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.even + + Stage: Stage-4 + Stats-Aggr Operator + + Stage: Stage-1 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.odd + + Stage: Stage-10 + Stats-Aggr Operator + + Stage: Stage-5 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.even + + Stage: Stage-7 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.even + + Stage: Stage-8 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-15 + Conditional Operator + + Stage: Stage-12 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-11 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.odd + + Stage: Stage-13 + Tez + Alias -> Map Operator Tree: +#### A masked pattern was here #### + TableScan + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.odd + + Stage: Stage-14 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: FROM src +INSERT INTO TABLE even SELECT key, value WHERE key % 2 = 0 +INSERT INTO TABLE odd SELECT key, value WHERE key % 2 = 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@even +PREHOOK: Output: default@odd +POSTHOOK: query: FROM src +INSERT INTO TABLE even SELECT key, value WHERE key % 2 = 0 +INSERT INTO TABLE odd SELECT key, value WHERE key % 2 = 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@even +POSTHOOK: Output: default@odd +POSTHOOK: Lineage: even.c EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: even.d SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: odd.c EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: odd.d SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=1).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=2).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=3).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=4).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=5).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM even +PREHOOK: type: QUERY +PREHOOK: Input: default@even +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM even +POSTHOOK: type: QUERY +POSTHOOK: Input: default@even +#### A masked pattern was here #### +POSTHOOK: Lineage: even.c EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: even.d SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: odd.c EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: odd.d SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=1).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=2).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=3).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=4).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=5).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +238 val_238 +86 val_86 +278 val_278 +98 val_98 +484 val_484 +150 val_150 +224 val_224 +66 val_66 +128 val_128 +146 val_146 +406 val_406 +374 val_374 +152 val_152 +82 val_82 +166 val_166 +430 val_430 +252 val_252 +292 val_292 +338 val_338 +446 val_446 +394 val_394 +482 val_482 +174 val_174 +494 val_494 +466 val_466 +208 val_208 +174 val_174 +396 val_396 +162 val_162 +266 val_266 +342 val_342 +0 val_0 +128 val_128 +316 val_316 +302 val_302 +438 val_438 +170 val_170 +20 val_20 +378 val_378 +92 val_92 +72 val_72 +4 val_4 +280 val_280 +208 val_208 +356 val_356 +382 val_382 +498 val_498 +386 val_386 +192 val_192 +286 val_286 +176 val_176 +54 val_54 +138 val_138 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +318 val_318 +332 val_332 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +260 val_260 +404 val_404 +384 val_384 +272 val_272 +138 val_138 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +172 val_172 +42 val_42 +158 val_158 +496 val_496 +0 val_0 +322 val_322 +468 val_468 +454 val_454 +100 val_100 +298 val_298 +418 val_418 +96 val_96 +26 val_26 +230 val_230 +120 val_120 +404 val_404 +436 val_436 +156 val_156 +468 val_468 +308 val_308 +196 val_196 +288 val_288 +98 val_98 +282 val_282 +318 val_318 +318 val_318 +470 val_470 +316 val_316 +0 val_0 +490 val_490 +364 val_364 +118 val_118 +134 val_134 +282 val_282 +138 val_138 +238 val_238 +118 val_118 +72 val_72 +90 val_90 +10 val_10 +306 val_306 +224 val_224 +242 val_242 +392 val_392 +272 val_272 +242 val_242 +452 val_452 +226 val_226 +402 val_402 +396 val_396 +58 val_58 +336 val_336 +168 val_168 +34 val_34 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +42 val_42 +430 val_430 +458 val_458 +78 val_78 +76 val_76 +492 val_492 +218 val_218 +228 val_228 +138 val_138 +30 val_30 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +230 val_230 +368 val_368 +296 val_296 +216 val_216 +344 val_344 +274 val_274 +116 val_116 +256 val_256 +70 val_70 +480 val_480 +288 val_288 +244 val_244 +438 val_438 +128 val_128 +432 val_432 +202 val_202 +316 val_316 +280 val_280 +2 val_2 +80 val_80 +44 val_44 +104 val_104 +466 val_466 +366 val_366 +406 val_406 +190 val_190 +406 val_406 +114 val_114 +258 val_258 +90 val_90 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +164 val_164 +454 val_454 +478 val_478 +298 val_298 +164 val_164 +424 val_424 +382 val_382 +70 val_70 +480 val_480 +24 val_24 +104 val_104 +70 val_70 +438 val_438 +414 val_414 +200 val_200 +360 val_360 +248 val_248 +444 val_444 +120 val_120 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +460 val_460 +480 val_480 +136 val_136 +172 val_172 +214 val_214 +462 val_462 +406 val_406 +454 val_454 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +384 val_384 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +348 val_348 +18 val_18 +344 val_344 +84 val_84 +28 val_28 +448 val_448 +152 val_152 +348 val_348 +194 val_194 +414 val_414 +222 val_222 +126 val_126 +90 val_90 +400 val_400 +200 val_200 +PREHOOK: query: SELECT * FROM odd +PREHOOK: type: QUERY +PREHOOK: Input: default@odd +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM odd +POSTHOOK: type: QUERY +POSTHOOK: Input: default@odd +#### A masked pattern was here #### +POSTHOOK: Lineage: even.c EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: even.d SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: odd.c EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: odd.d SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=1).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=2).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=3).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=4).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=5).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +265 val_265 +193 val_193 +401 val_401 +273 val_273 +369 val_369 +213 val_213 +429 val_429 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +403 val_403 +417 val_417 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +459 val_459 +237 val_237 +413 val_413 +207 val_207 +199 val_199 +399 val_399 +247 val_247 +417 val_417 +489 val_489 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +439 val_439 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +455 val_455 +311 val_311 +57 val_57 +205 val_205 +149 val_149 +345 val_345 +129 val_129 +489 val_489 +157 val_157 +221 val_221 +111 val_111 +47 val_47 +35 val_35 +427 val_427 +277 val_277 +399 val_399 +169 val_169 +125 val_125 +437 val_437 +469 val_469 +187 val_187 +459 val_459 +51 val_51 +103 val_103 +239 val_239 +213 val_213 +289 val_289 +221 val_221 +65 val_65 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +181 val_181 +67 val_67 +489 val_489 +353 val_353 +373 val_373 +217 val_217 +411 val_411 +463 val_463 +431 val_431 +179 val_179 +129 val_129 +119 val_119 +197 val_197 +393 val_393 +199 val_199 +191 val_191 +165 val_165 +327 val_327 +205 val_205 +131 val_131 +51 val_51 +43 val_43 +469 val_469 +95 val_95 +481 val_481 +457 val_457 +197 val_197 +187 val_187 +409 val_409 +137 val_137 +369 val_369 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +87 val_87 +179 val_179 +395 val_395 +419 val_419 +15 val_15 +307 val_307 +19 val_19 +435 val_435 +277 val_277 +273 val_273 +309 val_309 +389 val_389 +327 val_327 +369 val_369 +331 val_331 +401 val_401 +177 val_177 +5 val_5 +497 val_497 +317 val_317 +395 val_395 +35 val_35 +95 val_95 +11 val_11 +229 val_229 +233 val_233 +143 val_143 +195 val_195 +321 val_321 +119 val_119 +489 val_489 +41 val_41 +223 val_223 +149 val_149 +449 val_449 +453 val_453 +209 val_209 +69 val_69 +33 val_33 +103 val_103 +113 val_113 +367 val_367 +167 val_167 +219 val_219 +239 val_239 +485 val_485 +223 val_223 +263 val_263 +487 val_487 +401 val_401 +191 val_191 +5 val_5 +467 val_467 +229 val_229 +469 val_469 +463 val_463 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +193 val_193 +321 val_321 +335 val_335 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +409 val_409 +401 val_401 +203 val_203 +201 val_201 +217 val_217 +431 val_431 +125 val_125 +431 val_431 +187 val_187 +5 val_5 +397 val_397 +291 val_291 +351 val_351 +255 val_255 +163 val_163 +119 val_119 +491 val_491 +237 val_237 +439 val_439 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +317 val_317 +333 val_333 +493 val_493 +207 val_207 +249 val_249 +265 val_265 +83 val_83 +353 val_353 +233 val_233 +133 val_133 +175 val_175 +189 val_189 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +67 val_67 +379 val_379 +9 val_9 +341 val_341 +285 val_285 +167 val_167 +273 val_273 +183 val_183 +281 val_281 +97 val_97 +469 val_469 +315 val_315 +37 val_37 +307 val_307 +477 val_477 +169 val_169 +403 val_403 +97 val_97 +PREHOOK: query: -- drop the tables +DROP TABLE even +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@even +PREHOOK: Output: default@even +POSTHOOK: query: -- drop the tables +DROP TABLE even +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@even +POSTHOOK: Output: default@even +POSTHOOK: Lineage: even.c EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: even.d SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: odd.c EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: odd.d SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=1).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=2).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=3).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=4).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=5).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: DROP TABLE odd +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@odd +PREHOOK: Output: default@odd +POSTHOOK: query: DROP TABLE odd +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@odd +POSTHOOK: Output: default@odd +POSTHOOK: Lineage: even.c EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: even.d SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: odd.c EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: odd.d SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=1).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=2).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=3).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=4).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=5).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: DROP TABLE tmp_src +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@tmp_src +PREHOOK: Output: default@tmp_src +POSTHOOK: query: DROP TABLE tmp_src +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@tmp_src +POSTHOOK: Output: default@tmp_src +POSTHOOK: Lineage: even.c EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: even.d SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: odd.c EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: odd.d SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=1).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=2).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=3).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=4).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=5).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: DROP TABLE tmp_src_part +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@tmp_src_part +PREHOOK: Output: default@tmp_src_part +POSTHOOK: query: DROP TABLE tmp_src_part +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@tmp_src_part +POSTHOOK: Output: default@tmp_src_part +POSTHOOK: Lineage: even.c EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: even.d SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: odd.c EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: odd.d SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=1).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=2).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=3).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=4).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: tmp_src_part PARTITION(d=5).c SIMPLE [(tmp_src)tmp_src.FieldSchema(name:value, type:string, comment:null), ] Index: ql/src/test/results/clientpositive/tez/tez_insert_overwrite_local_directory_1.q.out =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: ql/src/test/results/clientpositive/tez/tez_insert_overwrite_local_directory_1.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/tez_insert_overwrite_local_directory_1.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/tez_insert_overwrite_local_directory_1.q.out (working copy) Property changes on: ql/src/test/results/clientpositive/tez/tez_insert_overwrite_local_directory_1.q.out ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Index: ql/src/test/results/clientpositive/tez/tez_join_tests.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/tez_join_tests.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/tez_join_tests.q.out (working copy) @@ -0,0 +1,2247 @@ +PREHOOK: query: explain +select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_RIGHTOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_LEFTOUTERJOIN (TOK_TABREF (TOK_TABNAME src1) a) (TOK_TABREF (TOK_TABNAME src) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL b) key))))) x) (TOK_TABREF (TOK_TABNAME src) c) (= (. (TOK_TABLE_OR_COL x) value) (. (TOK_TABLE_OR_COL c) value)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL x) key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + b + TableScan + alias: b + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 1 + value expressions: + expr: key + type: string + expr: value + type: string + Alias -> Map Operator Tree: + a + TableScan + alias: a + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 0 + Reduce Operator Tree: + Join Operator + condition map: + Left Outer Join0 to 1 + condition expressions: + 0 + 1 {VALUE._col0} {VALUE._col1} + handleSkewJoin: false + outputColumnNames: _col4, _col5 + Select Operator + expressions: + expr: _col4 + type: string + expr: _col5 + type: string + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Reduce Output Operator + key expressions: + expr: _col1 + type: string + sort order: + + Map-reduce partition columns: + expr: _col1 + type: string + tag: 0 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Alias -> Map Operator Tree: + c + TableScan + alias: c + Reduce Output Operator + key expressions: + expr: value + type: string + sort order: + + Map-reduce partition columns: + expr: value + type: string + tag: 1 + value expressions: + expr: key + type: string + expr: value + type: string + Reduce Operator Tree: + Join Operator + condition map: + Right Outer Join0 to 1 + condition expressions: + 0 {VALUE._col0} {VALUE._col1} + 1 {VALUE._col0} {VALUE._col1} + handleSkewJoin: false + outputColumnNames: _col0, _col1, _col2, _col3 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col2 + type: string + expr: _col3 + type: string + outputColumnNames: _col0, _col1, _col2, _col3 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col2 + type: string + expr: _col3 + type: string + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +#### A masked pattern was here #### +NULL NULL 0 val_0 +NULL NULL 97 val_97 +NULL NULL 97 val_97 +NULL NULL 96 val_96 +NULL NULL 95 val_95 +NULL NULL 95 val_95 +NULL NULL 92 val_92 +NULL NULL 90 val_90 +NULL NULL 90 val_90 +NULL NULL 90 val_90 +NULL NULL 9 val_9 +NULL NULL 87 val_87 +NULL NULL 86 val_86 +NULL NULL 85 val_85 +NULL NULL 84 val_84 +NULL NULL 84 val_84 +NULL NULL 83 val_83 +NULL NULL 83 val_83 +NULL NULL 82 val_82 +NULL NULL 80 val_80 +NULL NULL 8 val_8 +NULL NULL 78 val_78 +NULL NULL 77 val_77 +NULL NULL 76 val_76 +NULL NULL 76 val_76 +NULL NULL 74 val_74 +NULL NULL 72 val_72 +NULL NULL 72 val_72 +NULL NULL 70 val_70 +NULL NULL 70 val_70 +NULL NULL 70 val_70 +NULL NULL 69 val_69 +NULL NULL 67 val_67 +NULL NULL 67 val_67 +NULL NULL 65 val_65 +NULL NULL 64 val_64 +NULL NULL 58 val_58 +NULL NULL 58 val_58 +NULL NULL 57 val_57 +NULL NULL 54 val_54 +NULL NULL 53 val_53 +NULL NULL 51 val_51 +NULL NULL 51 val_51 +NULL NULL 5 val_5 +NULL NULL 5 val_5 +NULL NULL 5 val_5 +NULL NULL 498 val_498 +NULL NULL 498 val_498 +NULL NULL 498 val_498 +NULL NULL 497 val_497 +NULL NULL 496 val_496 +NULL NULL 495 val_495 +NULL NULL 494 val_494 +NULL NULL 493 val_493 +NULL NULL 492 val_492 +NULL NULL 492 val_492 +NULL NULL 491 val_491 +NULL NULL 490 val_490 +NULL NULL 489 val_489 +NULL NULL 489 val_489 +NULL NULL 489 val_489 +NULL NULL 489 val_489 +NULL NULL 487 val_487 +NULL NULL 485 val_485 +NULL NULL 484 val_484 +NULL NULL 483 val_483 +NULL NULL 482 val_482 +NULL NULL 481 val_481 +NULL NULL 480 val_480 +NULL NULL 480 val_480 +NULL NULL 480 val_480 +NULL NULL 479 val_479 +NULL NULL 478 val_478 +NULL NULL 478 val_478 +NULL NULL 477 val_477 +NULL NULL 475 val_475 +NULL NULL 472 val_472 +NULL NULL 470 val_470 +NULL NULL 47 val_47 +NULL NULL 469 val_469 +NULL NULL 469 val_469 +NULL NULL 469 val_469 +NULL NULL 469 val_469 +NULL NULL 469 val_469 +NULL NULL 468 val_468 +NULL NULL 468 val_468 +NULL NULL 468 val_468 +NULL NULL 468 val_468 +NULL NULL 467 val_467 +NULL NULL 466 val_466 +NULL NULL 466 val_466 +NULL NULL 466 val_466 +NULL NULL 463 val_463 +NULL NULL 463 val_463 +NULL NULL 462 val_462 +NULL NULL 462 val_462 +NULL NULL 460 val_460 +NULL NULL 459 val_459 +NULL NULL 459 val_459 +NULL NULL 458 val_458 +NULL NULL 458 val_458 +NULL NULL 457 val_457 +NULL NULL 455 val_455 +NULL NULL 454 val_454 +NULL NULL 454 val_454 +NULL NULL 454 val_454 +NULL NULL 453 val_453 +NULL NULL 452 val_452 +NULL NULL 449 val_449 +NULL NULL 448 val_448 +NULL NULL 446 val_446 +NULL NULL 444 val_444 +NULL NULL 443 val_443 +NULL NULL 44 val_44 +NULL NULL 439 val_439 +NULL NULL 439 val_439 +NULL NULL 438 val_438 +NULL NULL 438 val_438 +NULL NULL 438 val_438 +NULL NULL 437 val_437 +NULL NULL 436 val_436 +NULL NULL 435 val_435 +NULL NULL 432 val_432 +NULL NULL 431 val_431 +NULL NULL 431 val_431 +NULL NULL 431 val_431 +NULL NULL 430 val_430 +NULL NULL 430 val_430 +NULL NULL 430 val_430 +NULL NULL 43 val_43 +NULL NULL 429 val_429 +NULL NULL 429 val_429 +NULL NULL 427 val_427 +NULL NULL 424 val_424 +NULL NULL 424 val_424 +NULL NULL 421 val_421 +NULL NULL 42 val_42 +NULL NULL 42 val_42 +NULL NULL 419 val_419 +NULL NULL 418 val_418 +NULL NULL 417 val_417 +NULL NULL 417 val_417 +NULL NULL 417 val_417 +NULL NULL 414 val_414 +NULL NULL 414 val_414 +NULL NULL 413 val_413 +NULL NULL 413 val_413 +NULL NULL 411 val_411 +NULL NULL 41 val_41 +NULL NULL 409 val_409 +NULL NULL 409 val_409 +NULL NULL 409 val_409 +NULL NULL 407 val_407 +NULL NULL 404 val_404 +NULL NULL 404 val_404 +NULL NULL 403 val_403 +NULL NULL 403 val_403 +NULL NULL 403 val_403 +NULL NULL 402 val_402 +NULL NULL 400 val_400 +NULL NULL 4 val_4 +NULL NULL 399 val_399 +NULL NULL 399 val_399 +NULL NULL 397 val_397 +NULL NULL 397 val_397 +NULL NULL 396 val_396 +NULL NULL 396 val_396 +NULL NULL 396 val_396 +NULL NULL 395 val_395 +NULL NULL 395 val_395 +NULL NULL 394 val_394 +NULL NULL 393 val_393 +NULL NULL 392 val_392 +NULL NULL 389 val_389 +NULL NULL 386 val_386 +NULL NULL 384 val_384 +NULL NULL 384 val_384 +NULL NULL 384 val_384 +NULL NULL 382 val_382 +NULL NULL 382 val_382 +NULL NULL 379 val_379 +NULL NULL 378 val_378 +NULL NULL 377 val_377 +NULL NULL 375 val_375 +NULL NULL 374 val_374 +NULL NULL 373 val_373 +NULL NULL 37 val_37 +NULL NULL 37 val_37 +NULL NULL 368 val_368 +NULL NULL 367 val_367 +NULL NULL 367 val_367 +NULL NULL 366 val_366 +NULL NULL 365 val_365 +NULL NULL 364 val_364 +NULL NULL 362 val_362 +NULL NULL 360 val_360 +NULL NULL 356 val_356 +NULL NULL 353 val_353 +NULL NULL 353 val_353 +NULL NULL 351 val_351 +NULL NULL 35 val_35 +NULL NULL 35 val_35 +NULL NULL 35 val_35 +NULL NULL 348 val_348 +NULL NULL 348 val_348 +NULL NULL 348 val_348 +NULL NULL 348 val_348 +NULL NULL 348 val_348 +NULL NULL 345 val_345 +NULL NULL 344 val_344 +NULL NULL 344 val_344 +NULL NULL 342 val_342 +NULL NULL 342 val_342 +NULL NULL 341 val_341 +NULL NULL 34 val_34 +NULL NULL 339 val_339 +NULL NULL 338 val_338 +NULL NULL 336 val_336 +NULL NULL 335 val_335 +NULL NULL 333 val_333 +NULL NULL 333 val_333 +NULL NULL 332 val_332 +NULL NULL 331 val_331 +NULL NULL 331 val_331 +NULL NULL 33 val_33 +NULL NULL 327 val_327 +NULL NULL 327 val_327 +NULL NULL 327 val_327 +NULL NULL 325 val_325 +NULL NULL 325 val_325 +NULL NULL 323 val_323 +NULL NULL 322 val_322 +NULL NULL 322 val_322 +NULL NULL 321 val_321 +NULL NULL 321 val_321 +NULL NULL 318 val_318 +NULL NULL 318 val_318 +NULL NULL 318 val_318 +NULL NULL 317 val_317 +NULL NULL 317 val_317 +NULL NULL 316 val_316 +NULL NULL 316 val_316 +NULL NULL 316 val_316 +NULL NULL 315 val_315 +NULL NULL 310 val_310 +NULL NULL 309 val_309 +NULL NULL 309 val_309 +NULL NULL 308 val_308 +NULL NULL 307 val_307 +NULL NULL 307 val_307 +NULL NULL 306 val_306 +NULL NULL 305 val_305 +NULL NULL 302 val_302 +NULL NULL 30 val_30 +NULL NULL 298 val_298 +NULL NULL 298 val_298 +NULL NULL 298 val_298 +NULL NULL 296 val_296 +NULL NULL 292 val_292 +NULL NULL 291 val_291 +NULL NULL 289 val_289 +NULL NULL 288 val_288 +NULL NULL 288 val_288 +NULL NULL 287 val_287 +NULL NULL 286 val_286 +NULL NULL 285 val_285 +NULL NULL 284 val_284 +NULL NULL 283 val_283 +NULL NULL 282 val_282 +NULL NULL 282 val_282 +NULL NULL 281 val_281 +NULL NULL 281 val_281 +NULL NULL 280 val_280 +NULL NULL 280 val_280 +NULL NULL 28 val_28 +NULL NULL 277 val_277 +NULL NULL 277 val_277 +NULL NULL 277 val_277 +NULL NULL 277 val_277 +NULL NULL 275 val_275 +NULL NULL 274 val_274 +NULL NULL 272 val_272 +NULL NULL 272 val_272 +NULL NULL 27 val_27 +NULL NULL 266 val_266 +NULL NULL 265 val_265 +NULL NULL 265 val_265 +NULL NULL 263 val_263 +NULL NULL 262 val_262 +NULL NULL 260 val_260 +NULL NULL 26 val_26 +NULL NULL 26 val_26 +NULL NULL 258 val_258 +NULL NULL 257 val_257 +NULL NULL 256 val_256 +NULL NULL 256 val_256 +NULL NULL 252 val_252 +NULL NULL 249 val_249 +NULL NULL 248 val_248 +NULL NULL 247 val_247 +NULL NULL 244 val_244 +NULL NULL 242 val_242 +NULL NULL 242 val_242 +NULL NULL 241 val_241 +NULL NULL 24 val_24 +NULL NULL 24 val_24 +NULL NULL 239 val_239 +NULL NULL 239 val_239 +NULL NULL 237 val_237 +NULL NULL 237 val_237 +NULL NULL 235 val_235 +NULL NULL 233 val_233 +NULL NULL 233 val_233 +NULL NULL 230 val_230 +NULL NULL 230 val_230 +NULL NULL 230 val_230 +NULL NULL 230 val_230 +NULL NULL 230 val_230 +NULL NULL 229 val_229 +NULL NULL 229 val_229 +NULL NULL 228 val_228 +NULL NULL 226 val_226 +NULL NULL 223 val_223 +NULL NULL 223 val_223 +NULL NULL 222 val_222 +NULL NULL 221 val_221 +NULL NULL 221 val_221 +NULL NULL 219 val_219 +NULL NULL 219 val_219 +NULL NULL 218 val_218 +NULL NULL 217 val_217 +NULL NULL 217 val_217 +NULL NULL 216 val_216 +NULL NULL 216 val_216 +NULL NULL 214 val_214 +NULL NULL 209 val_209 +NULL NULL 209 val_209 +NULL NULL 208 val_208 +NULL NULL 208 val_208 +NULL NULL 208 val_208 +NULL NULL 207 val_207 +NULL NULL 207 val_207 +NULL NULL 205 val_205 +NULL NULL 205 val_205 +NULL NULL 203 val_203 +NULL NULL 203 val_203 +NULL NULL 202 val_202 +NULL NULL 201 val_201 +NULL NULL 200 val_200 +NULL NULL 200 val_200 +NULL NULL 20 val_20 +NULL NULL 2 val_2 +NULL NULL 199 val_199 +NULL NULL 199 val_199 +NULL NULL 199 val_199 +NULL NULL 197 val_197 +NULL NULL 197 val_197 +NULL NULL 196 val_196 +NULL NULL 195 val_195 +NULL NULL 195 val_195 +NULL NULL 194 val_194 +NULL NULL 193 val_193 +NULL NULL 193 val_193 +NULL NULL 193 val_193 +NULL NULL 192 val_192 +NULL NULL 191 val_191 +NULL NULL 191 val_191 +NULL NULL 190 val_190 +NULL NULL 19 val_19 +NULL NULL 189 val_189 +NULL NULL 187 val_187 +NULL NULL 187 val_187 +NULL NULL 187 val_187 +NULL NULL 186 val_186 +NULL NULL 183 val_183 +NULL NULL 181 val_181 +NULL NULL 180 val_180 +NULL NULL 18 val_18 +NULL NULL 18 val_18 +NULL NULL 179 val_179 +NULL NULL 179 val_179 +NULL NULL 178 val_178 +NULL NULL 177 val_177 +NULL NULL 176 val_176 +NULL NULL 176 val_176 +NULL NULL 175 val_175 +NULL NULL 175 val_175 +NULL NULL 174 val_174 +NULL NULL 174 val_174 +NULL NULL 172 val_172 +NULL NULL 172 val_172 +NULL NULL 170 val_170 +NULL NULL 17 val_17 +NULL NULL 169 val_169 +NULL NULL 169 val_169 +NULL NULL 169 val_169 +NULL NULL 169 val_169 +NULL NULL 168 val_168 +NULL NULL 167 val_167 +NULL NULL 167 val_167 +NULL NULL 167 val_167 +NULL NULL 166 val_166 +NULL NULL 165 val_165 +NULL NULL 165 val_165 +NULL NULL 164 val_164 +NULL NULL 164 val_164 +NULL NULL 163 val_163 +NULL NULL 162 val_162 +NULL NULL 160 val_160 +NULL NULL 158 val_158 +NULL NULL 157 val_157 +NULL NULL 156 val_156 +NULL NULL 155 val_155 +NULL NULL 153 val_153 +NULL NULL 152 val_152 +NULL NULL 152 val_152 +NULL NULL 15 val_15 +NULL NULL 15 val_15 +NULL NULL 149 val_149 +NULL NULL 149 val_149 +NULL NULL 145 val_145 +NULL NULL 143 val_143 +NULL NULL 138 val_138 +NULL NULL 138 val_138 +NULL NULL 138 val_138 +NULL NULL 138 val_138 +NULL NULL 137 val_137 +NULL NULL 137 val_137 +NULL NULL 136 val_136 +NULL NULL 134 val_134 +NULL NULL 134 val_134 +NULL NULL 133 val_133 +NULL NULL 131 val_131 +NULL NULL 129 val_129 +NULL NULL 129 val_129 +NULL NULL 126 val_126 +NULL NULL 125 val_125 +NULL NULL 125 val_125 +NULL NULL 120 val_120 +NULL NULL 120 val_120 +NULL NULL 12 val_12 +NULL NULL 12 val_12 +NULL NULL 119 val_119 +NULL NULL 119 val_119 +NULL NULL 119 val_119 +NULL NULL 118 val_118 +NULL NULL 118 val_118 +NULL NULL 116 val_116 +NULL NULL 114 val_114 +NULL NULL 113 val_113 +NULL NULL 113 val_113 +NULL NULL 111 val_111 +NULL NULL 11 val_11 +NULL NULL 105 val_105 +NULL NULL 104 val_104 +NULL NULL 104 val_104 +NULL NULL 103 val_103 +NULL NULL 103 val_103 +NULL NULL 100 val_100 +NULL NULL 100 val_100 +NULL NULL 10 val_10 +NULL NULL 0 val_0 +NULL NULL 0 val_0 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +146 val_146 146 val_146 +146 val_146 146 val_146 +146 val_146 146 val_146 +146 val_146 146 val_146 +150 val_150 150 val_150 +213 val_213 213 val_213 +213 val_213 213 val_213 +213 val_213 213 val_213 +213 val_213 213 val_213 +224 val_224 224 val_224 +224 val_224 224 val_224 +224 val_224 224 val_224 +224 val_224 224 val_224 +238 val_238 238 val_238 +238 val_238 238 val_238 +238 val_238 238 val_238 +238 val_238 238 val_238 +255 val_255 255 val_255 +255 val_255 255 val_255 +255 val_255 255 val_255 +255 val_255 255 val_255 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +278 val_278 278 val_278 +278 val_278 278 val_278 +278 val_278 278 val_278 +278 val_278 278 val_278 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +66 val_66 66 val_66 +98 val_98 98 val_98 +98 val_98 98 val_98 +98 val_98 98 val_98 +98 val_98 98 val_98 +PREHOOK: query: select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key)) x right outer join src c on (x.value = c.value) order by x.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key)) x right outer join src c on (x.value = c.value) order by x.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +#### A masked pattern was here #### +NULL NULL 0 val_0 +NULL NULL 97 val_97 +NULL NULL 97 val_97 +NULL NULL 96 val_96 +NULL NULL 95 val_95 +NULL NULL 95 val_95 +NULL NULL 92 val_92 +NULL NULL 90 val_90 +NULL NULL 90 val_90 +NULL NULL 90 val_90 +NULL NULL 9 val_9 +NULL NULL 87 val_87 +NULL NULL 86 val_86 +NULL NULL 85 val_85 +NULL NULL 84 val_84 +NULL NULL 84 val_84 +NULL NULL 83 val_83 +NULL NULL 83 val_83 +NULL NULL 82 val_82 +NULL NULL 80 val_80 +NULL NULL 8 val_8 +NULL NULL 78 val_78 +NULL NULL 77 val_77 +NULL NULL 76 val_76 +NULL NULL 76 val_76 +NULL NULL 74 val_74 +NULL NULL 72 val_72 +NULL NULL 72 val_72 +NULL NULL 70 val_70 +NULL NULL 70 val_70 +NULL NULL 70 val_70 +NULL NULL 69 val_69 +NULL NULL 67 val_67 +NULL NULL 67 val_67 +NULL NULL 65 val_65 +NULL NULL 64 val_64 +NULL NULL 58 val_58 +NULL NULL 58 val_58 +NULL NULL 57 val_57 +NULL NULL 54 val_54 +NULL NULL 53 val_53 +NULL NULL 51 val_51 +NULL NULL 51 val_51 +NULL NULL 5 val_5 +NULL NULL 5 val_5 +NULL NULL 5 val_5 +NULL NULL 498 val_498 +NULL NULL 498 val_498 +NULL NULL 498 val_498 +NULL NULL 497 val_497 +NULL NULL 496 val_496 +NULL NULL 495 val_495 +NULL NULL 494 val_494 +NULL NULL 493 val_493 +NULL NULL 492 val_492 +NULL NULL 492 val_492 +NULL NULL 491 val_491 +NULL NULL 490 val_490 +NULL NULL 489 val_489 +NULL NULL 489 val_489 +NULL NULL 489 val_489 +NULL NULL 489 val_489 +NULL NULL 487 val_487 +NULL NULL 485 val_485 +NULL NULL 484 val_484 +NULL NULL 483 val_483 +NULL NULL 482 val_482 +NULL NULL 481 val_481 +NULL NULL 480 val_480 +NULL NULL 480 val_480 +NULL NULL 480 val_480 +NULL NULL 479 val_479 +NULL NULL 478 val_478 +NULL NULL 478 val_478 +NULL NULL 477 val_477 +NULL NULL 475 val_475 +NULL NULL 472 val_472 +NULL NULL 470 val_470 +NULL NULL 47 val_47 +NULL NULL 469 val_469 +NULL NULL 469 val_469 +NULL NULL 469 val_469 +NULL NULL 469 val_469 +NULL NULL 469 val_469 +NULL NULL 468 val_468 +NULL NULL 468 val_468 +NULL NULL 468 val_468 +NULL NULL 468 val_468 +NULL NULL 467 val_467 +NULL NULL 466 val_466 +NULL NULL 466 val_466 +NULL NULL 466 val_466 +NULL NULL 463 val_463 +NULL NULL 463 val_463 +NULL NULL 462 val_462 +NULL NULL 462 val_462 +NULL NULL 460 val_460 +NULL NULL 459 val_459 +NULL NULL 459 val_459 +NULL NULL 458 val_458 +NULL NULL 458 val_458 +NULL NULL 457 val_457 +NULL NULL 455 val_455 +NULL NULL 454 val_454 +NULL NULL 454 val_454 +NULL NULL 454 val_454 +NULL NULL 453 val_453 +NULL NULL 452 val_452 +NULL NULL 449 val_449 +NULL NULL 448 val_448 +NULL NULL 446 val_446 +NULL NULL 444 val_444 +NULL NULL 443 val_443 +NULL NULL 44 val_44 +NULL NULL 439 val_439 +NULL NULL 439 val_439 +NULL NULL 438 val_438 +NULL NULL 438 val_438 +NULL NULL 438 val_438 +NULL NULL 437 val_437 +NULL NULL 436 val_436 +NULL NULL 435 val_435 +NULL NULL 432 val_432 +NULL NULL 431 val_431 +NULL NULL 431 val_431 +NULL NULL 431 val_431 +NULL NULL 430 val_430 +NULL NULL 430 val_430 +NULL NULL 430 val_430 +NULL NULL 43 val_43 +NULL NULL 429 val_429 +NULL NULL 429 val_429 +NULL NULL 427 val_427 +NULL NULL 424 val_424 +NULL NULL 424 val_424 +NULL NULL 421 val_421 +NULL NULL 42 val_42 +NULL NULL 42 val_42 +NULL NULL 419 val_419 +NULL NULL 418 val_418 +NULL NULL 417 val_417 +NULL NULL 417 val_417 +NULL NULL 417 val_417 +NULL NULL 414 val_414 +NULL NULL 414 val_414 +NULL NULL 413 val_413 +NULL NULL 413 val_413 +NULL NULL 411 val_411 +NULL NULL 41 val_41 +NULL NULL 409 val_409 +NULL NULL 409 val_409 +NULL NULL 409 val_409 +NULL NULL 407 val_407 +NULL NULL 404 val_404 +NULL NULL 404 val_404 +NULL NULL 403 val_403 +NULL NULL 403 val_403 +NULL NULL 403 val_403 +NULL NULL 402 val_402 +NULL NULL 400 val_400 +NULL NULL 4 val_4 +NULL NULL 399 val_399 +NULL NULL 399 val_399 +NULL NULL 397 val_397 +NULL NULL 397 val_397 +NULL NULL 396 val_396 +NULL NULL 396 val_396 +NULL NULL 396 val_396 +NULL NULL 395 val_395 +NULL NULL 395 val_395 +NULL NULL 394 val_394 +NULL NULL 393 val_393 +NULL NULL 392 val_392 +NULL NULL 389 val_389 +NULL NULL 386 val_386 +NULL NULL 384 val_384 +NULL NULL 384 val_384 +NULL NULL 384 val_384 +NULL NULL 382 val_382 +NULL NULL 382 val_382 +NULL NULL 379 val_379 +NULL NULL 378 val_378 +NULL NULL 377 val_377 +NULL NULL 375 val_375 +NULL NULL 374 val_374 +NULL NULL 373 val_373 +NULL NULL 37 val_37 +NULL NULL 37 val_37 +NULL NULL 368 val_368 +NULL NULL 367 val_367 +NULL NULL 367 val_367 +NULL NULL 366 val_366 +NULL NULL 365 val_365 +NULL NULL 364 val_364 +NULL NULL 362 val_362 +NULL NULL 360 val_360 +NULL NULL 356 val_356 +NULL NULL 353 val_353 +NULL NULL 353 val_353 +NULL NULL 351 val_351 +NULL NULL 35 val_35 +NULL NULL 35 val_35 +NULL NULL 35 val_35 +NULL NULL 348 val_348 +NULL NULL 348 val_348 +NULL NULL 348 val_348 +NULL NULL 348 val_348 +NULL NULL 348 val_348 +NULL NULL 345 val_345 +NULL NULL 344 val_344 +NULL NULL 344 val_344 +NULL NULL 342 val_342 +NULL NULL 342 val_342 +NULL NULL 341 val_341 +NULL NULL 34 val_34 +NULL NULL 339 val_339 +NULL NULL 338 val_338 +NULL NULL 336 val_336 +NULL NULL 335 val_335 +NULL NULL 333 val_333 +NULL NULL 333 val_333 +NULL NULL 332 val_332 +NULL NULL 331 val_331 +NULL NULL 331 val_331 +NULL NULL 33 val_33 +NULL NULL 327 val_327 +NULL NULL 327 val_327 +NULL NULL 327 val_327 +NULL NULL 325 val_325 +NULL NULL 325 val_325 +NULL NULL 323 val_323 +NULL NULL 322 val_322 +NULL NULL 322 val_322 +NULL NULL 321 val_321 +NULL NULL 321 val_321 +NULL NULL 318 val_318 +NULL NULL 318 val_318 +NULL NULL 318 val_318 +NULL NULL 317 val_317 +NULL NULL 317 val_317 +NULL NULL 316 val_316 +NULL NULL 316 val_316 +NULL NULL 316 val_316 +NULL NULL 315 val_315 +NULL NULL 310 val_310 +NULL NULL 309 val_309 +NULL NULL 309 val_309 +NULL NULL 308 val_308 +NULL NULL 307 val_307 +NULL NULL 307 val_307 +NULL NULL 306 val_306 +NULL NULL 305 val_305 +NULL NULL 302 val_302 +NULL NULL 30 val_30 +NULL NULL 298 val_298 +NULL NULL 298 val_298 +NULL NULL 298 val_298 +NULL NULL 296 val_296 +NULL NULL 292 val_292 +NULL NULL 291 val_291 +NULL NULL 289 val_289 +NULL NULL 288 val_288 +NULL NULL 288 val_288 +NULL NULL 287 val_287 +NULL NULL 286 val_286 +NULL NULL 285 val_285 +NULL NULL 284 val_284 +NULL NULL 283 val_283 +NULL NULL 282 val_282 +NULL NULL 282 val_282 +NULL NULL 281 val_281 +NULL NULL 281 val_281 +NULL NULL 280 val_280 +NULL NULL 280 val_280 +NULL NULL 28 val_28 +NULL NULL 277 val_277 +NULL NULL 277 val_277 +NULL NULL 277 val_277 +NULL NULL 277 val_277 +NULL NULL 275 val_275 +NULL NULL 274 val_274 +NULL NULL 272 val_272 +NULL NULL 272 val_272 +NULL NULL 27 val_27 +NULL NULL 266 val_266 +NULL NULL 265 val_265 +NULL NULL 265 val_265 +NULL NULL 263 val_263 +NULL NULL 262 val_262 +NULL NULL 260 val_260 +NULL NULL 26 val_26 +NULL NULL 26 val_26 +NULL NULL 258 val_258 +NULL NULL 257 val_257 +NULL NULL 256 val_256 +NULL NULL 256 val_256 +NULL NULL 252 val_252 +NULL NULL 249 val_249 +NULL NULL 248 val_248 +NULL NULL 247 val_247 +NULL NULL 244 val_244 +NULL NULL 242 val_242 +NULL NULL 242 val_242 +NULL NULL 241 val_241 +NULL NULL 24 val_24 +NULL NULL 24 val_24 +NULL NULL 239 val_239 +NULL NULL 239 val_239 +NULL NULL 237 val_237 +NULL NULL 237 val_237 +NULL NULL 235 val_235 +NULL NULL 233 val_233 +NULL NULL 233 val_233 +NULL NULL 230 val_230 +NULL NULL 230 val_230 +NULL NULL 230 val_230 +NULL NULL 230 val_230 +NULL NULL 230 val_230 +NULL NULL 229 val_229 +NULL NULL 229 val_229 +NULL NULL 228 val_228 +NULL NULL 226 val_226 +NULL NULL 223 val_223 +NULL NULL 223 val_223 +NULL NULL 222 val_222 +NULL NULL 221 val_221 +NULL NULL 221 val_221 +NULL NULL 219 val_219 +NULL NULL 219 val_219 +NULL NULL 218 val_218 +NULL NULL 217 val_217 +NULL NULL 217 val_217 +NULL NULL 216 val_216 +NULL NULL 216 val_216 +NULL NULL 214 val_214 +NULL NULL 209 val_209 +NULL NULL 209 val_209 +NULL NULL 208 val_208 +NULL NULL 208 val_208 +NULL NULL 208 val_208 +NULL NULL 207 val_207 +NULL NULL 207 val_207 +NULL NULL 205 val_205 +NULL NULL 205 val_205 +NULL NULL 203 val_203 +NULL NULL 203 val_203 +NULL NULL 202 val_202 +NULL NULL 201 val_201 +NULL NULL 200 val_200 +NULL NULL 200 val_200 +NULL NULL 20 val_20 +NULL NULL 2 val_2 +NULL NULL 199 val_199 +NULL NULL 199 val_199 +NULL NULL 199 val_199 +NULL NULL 197 val_197 +NULL NULL 197 val_197 +NULL NULL 196 val_196 +NULL NULL 195 val_195 +NULL NULL 195 val_195 +NULL NULL 194 val_194 +NULL NULL 193 val_193 +NULL NULL 193 val_193 +NULL NULL 193 val_193 +NULL NULL 192 val_192 +NULL NULL 191 val_191 +NULL NULL 191 val_191 +NULL NULL 190 val_190 +NULL NULL 19 val_19 +NULL NULL 189 val_189 +NULL NULL 187 val_187 +NULL NULL 187 val_187 +NULL NULL 187 val_187 +NULL NULL 186 val_186 +NULL NULL 183 val_183 +NULL NULL 181 val_181 +NULL NULL 180 val_180 +NULL NULL 18 val_18 +NULL NULL 18 val_18 +NULL NULL 179 val_179 +NULL NULL 179 val_179 +NULL NULL 178 val_178 +NULL NULL 177 val_177 +NULL NULL 176 val_176 +NULL NULL 176 val_176 +NULL NULL 175 val_175 +NULL NULL 175 val_175 +NULL NULL 174 val_174 +NULL NULL 174 val_174 +NULL NULL 172 val_172 +NULL NULL 172 val_172 +NULL NULL 170 val_170 +NULL NULL 17 val_17 +NULL NULL 169 val_169 +NULL NULL 169 val_169 +NULL NULL 169 val_169 +NULL NULL 169 val_169 +NULL NULL 168 val_168 +NULL NULL 167 val_167 +NULL NULL 167 val_167 +NULL NULL 167 val_167 +NULL NULL 166 val_166 +NULL NULL 165 val_165 +NULL NULL 165 val_165 +NULL NULL 164 val_164 +NULL NULL 164 val_164 +NULL NULL 163 val_163 +NULL NULL 162 val_162 +NULL NULL 160 val_160 +NULL NULL 158 val_158 +NULL NULL 157 val_157 +NULL NULL 156 val_156 +NULL NULL 155 val_155 +NULL NULL 153 val_153 +NULL NULL 152 val_152 +NULL NULL 152 val_152 +NULL NULL 15 val_15 +NULL NULL 15 val_15 +NULL NULL 149 val_149 +NULL NULL 149 val_149 +NULL NULL 145 val_145 +NULL NULL 143 val_143 +NULL NULL 138 val_138 +NULL NULL 138 val_138 +NULL NULL 138 val_138 +NULL NULL 138 val_138 +NULL NULL 137 val_137 +NULL NULL 137 val_137 +NULL NULL 136 val_136 +NULL NULL 134 val_134 +NULL NULL 134 val_134 +NULL NULL 133 val_133 +NULL NULL 131 val_131 +NULL NULL 129 val_129 +NULL NULL 129 val_129 +NULL NULL 126 val_126 +NULL NULL 125 val_125 +NULL NULL 125 val_125 +NULL NULL 120 val_120 +NULL NULL 120 val_120 +NULL NULL 12 val_12 +NULL NULL 12 val_12 +NULL NULL 119 val_119 +NULL NULL 119 val_119 +NULL NULL 119 val_119 +NULL NULL 118 val_118 +NULL NULL 118 val_118 +NULL NULL 116 val_116 +NULL NULL 114 val_114 +NULL NULL 113 val_113 +NULL NULL 113 val_113 +NULL NULL 111 val_111 +NULL NULL 11 val_11 +NULL NULL 105 val_105 +NULL NULL 104 val_104 +NULL NULL 104 val_104 +NULL NULL 103 val_103 +NULL NULL 103 val_103 +NULL NULL 100 val_100 +NULL NULL 100 val_100 +NULL NULL 10 val_10 +NULL NULL 0 val_0 +NULL NULL 0 val_0 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +146 val_146 146 val_146 +146 val_146 146 val_146 +146 val_146 146 val_146 +146 val_146 146 val_146 +150 val_150 150 val_150 +213 val_213 213 val_213 +213 val_213 213 val_213 +213 val_213 213 val_213 +213 val_213 213 val_213 +224 val_224 224 val_224 +224 val_224 224 val_224 +224 val_224 224 val_224 +224 val_224 224 val_224 +238 val_238 238 val_238 +238 val_238 238 val_238 +238 val_238 238 val_238 +238 val_238 238 val_238 +255 val_255 255 val_255 +255 val_255 255 val_255 +255 val_255 255 val_255 +255 val_255 255 val_255 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +278 val_278 278 val_278 +278 val_278 278 val_278 +278 val_278 278 val_278 +278 val_278 278 val_278 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +66 val_66 66 val_66 +98 val_98 98 val_98 +98 val_98 98 val_98 +98 val_98 98 val_98 +98 val_98 98 val_98 +PREHOOK: query: select * from src1 a left outer join src b on (a.key = b.key) right outer join src c on (a.value = c.value) order by a.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +#### A masked pattern was here #### +POSTHOOK: query: select * from src1 a left outer join src b on (a.key = b.key) right outer join src c on (a.value = c.value) order by a.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +#### A masked pattern was here #### +NULL NULL NULL NULL 0 val_0 +NULL NULL NULL NULL 97 val_97 +NULL NULL NULL NULL 97 val_97 +NULL NULL NULL NULL 96 val_96 +NULL NULL NULL NULL 95 val_95 +NULL NULL NULL NULL 95 val_95 +NULL NULL NULL NULL 92 val_92 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 9 val_9 +NULL NULL NULL NULL 87 val_87 +NULL NULL NULL NULL 86 val_86 +NULL NULL NULL NULL 85 val_85 +NULL NULL NULL NULL 84 val_84 +NULL NULL NULL NULL 84 val_84 +NULL NULL NULL NULL 83 val_83 +NULL NULL NULL NULL 83 val_83 +NULL NULL NULL NULL 82 val_82 +NULL NULL NULL NULL 80 val_80 +NULL NULL NULL NULL 8 val_8 +NULL NULL NULL NULL 78 val_78 +NULL NULL NULL NULL 77 val_77 +NULL NULL NULL NULL 76 val_76 +NULL NULL NULL NULL 76 val_76 +NULL NULL NULL NULL 74 val_74 +NULL NULL NULL NULL 72 val_72 +NULL NULL NULL NULL 72 val_72 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 69 val_69 +NULL NULL NULL NULL 67 val_67 +NULL NULL NULL NULL 67 val_67 +NULL NULL NULL NULL 65 val_65 +NULL NULL NULL NULL 64 val_64 +NULL NULL NULL NULL 58 val_58 +NULL NULL NULL NULL 58 val_58 +NULL NULL NULL NULL 57 val_57 +NULL NULL NULL NULL 54 val_54 +NULL NULL NULL NULL 53 val_53 +NULL NULL NULL NULL 51 val_51 +NULL NULL NULL NULL 51 val_51 +NULL NULL NULL NULL 5 val_5 +NULL NULL NULL NULL 5 val_5 +NULL NULL NULL NULL 5 val_5 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 497 val_497 +NULL NULL NULL NULL 496 val_496 +NULL NULL NULL NULL 495 val_495 +NULL NULL NULL NULL 494 val_494 +NULL NULL NULL NULL 493 val_493 +NULL NULL NULL NULL 492 val_492 +NULL NULL NULL NULL 492 val_492 +NULL NULL NULL NULL 491 val_491 +NULL NULL NULL NULL 490 val_490 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 487 val_487 +NULL NULL NULL NULL 485 val_485 +NULL NULL NULL NULL 483 val_483 +NULL NULL NULL NULL 482 val_482 +NULL NULL NULL NULL 481 val_481 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 479 val_479 +NULL NULL NULL NULL 478 val_478 +NULL NULL NULL NULL 478 val_478 +NULL NULL NULL NULL 477 val_477 +NULL NULL NULL NULL 475 val_475 +NULL NULL NULL NULL 472 val_472 +NULL NULL NULL NULL 470 val_470 +NULL NULL NULL NULL 47 val_47 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 467 val_467 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 463 val_463 +NULL NULL NULL NULL 463 val_463 +NULL NULL NULL NULL 462 val_462 +NULL NULL NULL NULL 462 val_462 +NULL NULL NULL NULL 460 val_460 +NULL NULL NULL NULL 459 val_459 +NULL NULL NULL NULL 459 val_459 +NULL NULL NULL NULL 458 val_458 +NULL NULL NULL NULL 458 val_458 +NULL NULL NULL NULL 457 val_457 +NULL NULL NULL NULL 455 val_455 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 453 val_453 +NULL NULL NULL NULL 452 val_452 +NULL NULL NULL NULL 449 val_449 +NULL NULL NULL NULL 448 val_448 +NULL NULL NULL NULL 446 val_446 +NULL NULL NULL NULL 444 val_444 +NULL NULL NULL NULL 443 val_443 +NULL NULL NULL NULL 44 val_44 +NULL NULL NULL NULL 439 val_439 +NULL NULL NULL NULL 439 val_439 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 437 val_437 +NULL NULL NULL NULL 436 val_436 +NULL NULL NULL NULL 435 val_435 +NULL NULL NULL NULL 432 val_432 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 43 val_43 +NULL NULL NULL NULL 429 val_429 +NULL NULL NULL NULL 429 val_429 +NULL NULL NULL NULL 427 val_427 +NULL NULL NULL NULL 424 val_424 +NULL NULL NULL NULL 424 val_424 +NULL NULL NULL NULL 421 val_421 +NULL NULL NULL NULL 42 val_42 +NULL NULL NULL NULL 42 val_42 +NULL NULL NULL NULL 419 val_419 +NULL NULL NULL NULL 418 val_418 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 414 val_414 +NULL NULL NULL NULL 414 val_414 +NULL NULL NULL NULL 413 val_413 +NULL NULL NULL NULL 413 val_413 +NULL NULL NULL NULL 411 val_411 +NULL NULL NULL NULL 41 val_41 +NULL NULL NULL NULL 407 val_407 +NULL NULL NULL NULL 404 val_404 +NULL NULL NULL NULL 404 val_404 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 402 val_402 +NULL NULL NULL NULL 400 val_400 +NULL NULL NULL NULL 4 val_4 +NULL NULL NULL NULL 399 val_399 +NULL NULL NULL NULL 399 val_399 +NULL NULL NULL NULL 397 val_397 +NULL NULL NULL NULL 397 val_397 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 395 val_395 +NULL NULL NULL NULL 395 val_395 +NULL NULL NULL NULL 394 val_394 +NULL NULL NULL NULL 393 val_393 +NULL NULL NULL NULL 392 val_392 +NULL NULL NULL NULL 389 val_389 +NULL NULL NULL NULL 386 val_386 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 382 val_382 +NULL NULL NULL NULL 382 val_382 +NULL NULL NULL NULL 379 val_379 +NULL NULL NULL NULL 378 val_378 +NULL NULL NULL NULL 377 val_377 +NULL NULL NULL NULL 375 val_375 +NULL NULL NULL NULL 374 val_374 +NULL NULL NULL NULL 373 val_373 +NULL NULL NULL NULL 37 val_37 +NULL NULL NULL NULL 37 val_37 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 368 val_368 +NULL NULL NULL NULL 367 val_367 +NULL NULL NULL NULL 367 val_367 +NULL NULL NULL NULL 366 val_366 +NULL NULL NULL NULL 365 val_365 +NULL NULL NULL NULL 364 val_364 +NULL NULL NULL NULL 362 val_362 +NULL NULL NULL NULL 360 val_360 +NULL NULL NULL NULL 356 val_356 +NULL NULL NULL NULL 353 val_353 +NULL NULL NULL NULL 353 val_353 +NULL NULL NULL NULL 351 val_351 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 345 val_345 +NULL NULL NULL NULL 344 val_344 +NULL NULL NULL NULL 344 val_344 +NULL NULL NULL NULL 342 val_342 +NULL NULL NULL NULL 342 val_342 +NULL NULL NULL NULL 341 val_341 +NULL NULL NULL NULL 34 val_34 +NULL NULL NULL NULL 339 val_339 +NULL NULL NULL NULL 338 val_338 +NULL NULL NULL NULL 336 val_336 +NULL NULL NULL NULL 335 val_335 +NULL NULL NULL NULL 333 val_333 +NULL NULL NULL NULL 333 val_333 +NULL NULL NULL NULL 332 val_332 +NULL NULL NULL NULL 331 val_331 +NULL NULL NULL NULL 331 val_331 +NULL NULL NULL NULL 33 val_33 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 325 val_325 +NULL NULL NULL NULL 325 val_325 +NULL NULL NULL NULL 323 val_323 +NULL NULL NULL NULL 322 val_322 +NULL NULL NULL NULL 322 val_322 +NULL NULL NULL NULL 321 val_321 +NULL NULL NULL NULL 321 val_321 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 317 val_317 +NULL NULL NULL NULL 317 val_317 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 315 val_315 +NULL NULL NULL NULL 310 val_310 +NULL NULL NULL NULL 309 val_309 +NULL NULL NULL NULL 309 val_309 +NULL NULL NULL NULL 308 val_308 +NULL NULL NULL NULL 307 val_307 +NULL NULL NULL NULL 307 val_307 +NULL NULL NULL NULL 306 val_306 +NULL NULL NULL NULL 305 val_305 +NULL NULL NULL NULL 302 val_302 +NULL NULL NULL NULL 30 val_30 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 296 val_296 +NULL NULL NULL NULL 292 val_292 +NULL NULL NULL NULL 291 val_291 +NULL NULL NULL NULL 289 val_289 +NULL NULL NULL NULL 288 val_288 +NULL NULL NULL NULL 288 val_288 +NULL NULL NULL NULL 287 val_287 +NULL NULL NULL NULL 286 val_286 +NULL NULL NULL NULL 285 val_285 +NULL NULL NULL NULL 284 val_284 +NULL NULL NULL NULL 283 val_283 +NULL NULL NULL NULL 282 val_282 +NULL NULL NULL NULL 282 val_282 +NULL NULL NULL NULL 281 val_281 +NULL NULL NULL NULL 281 val_281 +NULL NULL NULL NULL 280 val_280 +NULL NULL NULL NULL 280 val_280 +NULL NULL NULL NULL 28 val_28 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 275 val_275 +NULL NULL NULL NULL 274 val_274 +NULL NULL NULL NULL 272 val_272 +NULL NULL NULL NULL 272 val_272 +NULL NULL NULL NULL 266 val_266 +NULL NULL NULL NULL 263 val_263 +NULL NULL NULL NULL 262 val_262 +NULL NULL NULL NULL 260 val_260 +NULL NULL NULL NULL 26 val_26 +NULL NULL NULL NULL 26 val_26 +NULL NULL NULL NULL 258 val_258 +NULL NULL NULL NULL 257 val_257 +NULL NULL NULL NULL 256 val_256 +NULL NULL NULL NULL 256 val_256 +NULL NULL NULL NULL 252 val_252 +NULL NULL NULL NULL 249 val_249 +NULL NULL NULL NULL 248 val_248 +NULL NULL NULL NULL 247 val_247 +NULL NULL NULL NULL 244 val_244 +NULL NULL NULL NULL 242 val_242 +NULL NULL NULL NULL 242 val_242 +NULL NULL NULL NULL 241 val_241 +NULL NULL NULL NULL 24 val_24 +NULL NULL NULL NULL 24 val_24 +NULL NULL NULL NULL 239 val_239 +NULL NULL NULL NULL 239 val_239 +NULL NULL NULL NULL 237 val_237 +NULL NULL NULL NULL 237 val_237 +NULL NULL NULL NULL 235 val_235 +NULL NULL NULL NULL 233 val_233 +NULL NULL NULL NULL 233 val_233 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 229 val_229 +NULL NULL NULL NULL 229 val_229 +NULL NULL NULL NULL 228 val_228 +NULL NULL NULL NULL 226 val_226 +NULL NULL NULL NULL 224 val_224 +NULL NULL NULL NULL 224 val_224 +NULL NULL NULL NULL 223 val_223 +NULL NULL NULL NULL 223 val_223 +NULL NULL NULL NULL 222 val_222 +NULL NULL NULL NULL 221 val_221 +NULL NULL NULL NULL 221 val_221 +NULL NULL NULL NULL 219 val_219 +NULL NULL NULL NULL 219 val_219 +NULL NULL NULL NULL 218 val_218 +NULL NULL NULL NULL 217 val_217 +NULL NULL NULL NULL 217 val_217 +NULL NULL NULL NULL 216 val_216 +NULL NULL NULL NULL 216 val_216 +NULL NULL NULL NULL 214 val_214 +NULL NULL NULL NULL 209 val_209 +NULL NULL NULL NULL 209 val_209 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 207 val_207 +NULL NULL NULL NULL 207 val_207 +NULL NULL NULL NULL 205 val_205 +NULL NULL NULL NULL 205 val_205 +NULL NULL NULL NULL 203 val_203 +NULL NULL NULL NULL 203 val_203 +NULL NULL NULL NULL 202 val_202 +NULL NULL NULL NULL 201 val_201 +NULL NULL NULL NULL 200 val_200 +NULL NULL NULL NULL 200 val_200 +NULL NULL NULL NULL 20 val_20 +NULL NULL NULL NULL 2 val_2 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 197 val_197 +NULL NULL NULL NULL 197 val_197 +NULL NULL NULL NULL 196 val_196 +NULL NULL NULL NULL 195 val_195 +NULL NULL NULL NULL 195 val_195 +NULL NULL NULL NULL 194 val_194 +NULL NULL NULL NULL 192 val_192 +NULL NULL NULL NULL 191 val_191 +NULL NULL NULL NULL 191 val_191 +NULL NULL NULL NULL 190 val_190 +NULL NULL NULL NULL 19 val_19 +NULL NULL NULL NULL 189 val_189 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 186 val_186 +NULL NULL NULL NULL 183 val_183 +NULL NULL NULL NULL 181 val_181 +NULL NULL NULL NULL 180 val_180 +NULL NULL NULL NULL 18 val_18 +NULL NULL NULL NULL 18 val_18 +NULL NULL NULL NULL 179 val_179 +NULL NULL NULL NULL 179 val_179 +NULL NULL NULL NULL 178 val_178 +NULL NULL NULL NULL 177 val_177 +NULL NULL NULL NULL 176 val_176 +NULL NULL NULL NULL 176 val_176 +NULL NULL NULL NULL 175 val_175 +NULL NULL NULL NULL 175 val_175 +NULL NULL NULL NULL 174 val_174 +NULL NULL NULL NULL 174 val_174 +NULL NULL NULL NULL 172 val_172 +NULL NULL NULL NULL 172 val_172 +NULL NULL NULL NULL 170 val_170 +NULL NULL NULL NULL 17 val_17 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 168 val_168 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 166 val_166 +NULL NULL NULL NULL 164 val_164 +NULL NULL NULL NULL 164 val_164 +NULL NULL NULL NULL 163 val_163 +NULL NULL NULL NULL 162 val_162 +NULL NULL NULL NULL 160 val_160 +NULL NULL NULL NULL 158 val_158 +NULL NULL NULL NULL 157 val_157 +NULL NULL NULL NULL 156 val_156 +NULL NULL NULL NULL 155 val_155 +NULL NULL NULL NULL 153 val_153 +NULL NULL NULL NULL 152 val_152 +NULL NULL NULL NULL 152 val_152 +NULL NULL NULL NULL 15 val_15 +NULL NULL NULL NULL 15 val_15 +NULL NULL NULL NULL 149 val_149 +NULL NULL NULL NULL 149 val_149 +NULL NULL NULL NULL 145 val_145 +NULL NULL NULL NULL 143 val_143 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 137 val_137 +NULL NULL NULL NULL 137 val_137 +NULL NULL NULL NULL 136 val_136 +NULL NULL NULL NULL 134 val_134 +NULL NULL NULL NULL 134 val_134 +NULL NULL NULL NULL 133 val_133 +NULL NULL NULL NULL 131 val_131 +NULL NULL NULL NULL 129 val_129 +NULL NULL NULL NULL 129 val_129 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 126 val_126 +NULL NULL NULL NULL 125 val_125 +NULL NULL NULL NULL 125 val_125 +NULL NULL NULL NULL 120 val_120 +NULL NULL NULL NULL 120 val_120 +NULL NULL NULL NULL 12 val_12 +NULL NULL NULL NULL 12 val_12 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 118 val_118 +NULL NULL NULL NULL 118 val_118 +NULL NULL NULL NULL 116 val_116 +NULL NULL NULL NULL 114 val_114 +NULL NULL NULL NULL 113 val_113 +NULL NULL NULL NULL 113 val_113 +NULL NULL NULL NULL 111 val_111 +NULL NULL NULL NULL 11 val_11 +NULL NULL NULL NULL 105 val_105 +NULL NULL NULL NULL 104 val_104 +NULL NULL NULL NULL 104 val_104 +NULL NULL NULL NULL 103 val_103 +NULL NULL NULL NULL 103 val_103 +NULL NULL NULL NULL 100 val_100 +NULL NULL NULL NULL 100 val_100 +NULL NULL NULL NULL 10 val_10 +NULL NULL NULL NULL 0 val_0 +NULL NULL NULL NULL 0 val_0 + val_409 NULL NULL 409 val_409 + val_165 NULL NULL 165 val_165 + val_165 NULL NULL 165 val_165 + val_193 NULL NULL 193 val_193 + val_484 NULL NULL 484 val_484 + val_409 NULL NULL 409 val_409 + val_265 NULL NULL 265 val_265 + val_265 NULL NULL 265 val_265 + val_27 NULL NULL 27 val_27 + val_193 NULL NULL 193 val_193 + val_193 NULL NULL 193 val_193 + val_409 NULL NULL 409 val_409 +146 val_146 146 val_146 146 val_146 +146 val_146 146 val_146 146 val_146 +146 val_146 146 val_146 146 val_146 +146 val_146 146 val_146 146 val_146 +150 val_150 150 val_150 150 val_150 +213 val_213 213 val_213 213 val_213 +213 val_213 213 val_213 213 val_213 +213 val_213 213 val_213 213 val_213 +213 val_213 213 val_213 213 val_213 +238 val_238 238 val_238 238 val_238 +238 val_238 238 val_238 238 val_238 +238 val_238 238 val_238 238 val_238 +238 val_238 238 val_238 238 val_238 +255 val_255 255 val_255 255 val_255 +255 val_255 255 val_255 255 val_255 +255 val_255 255 val_255 255 val_255 +255 val_255 255 val_255 255 val_255 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +278 val_278 278 val_278 278 val_278 +278 val_278 278 val_278 278 val_278 +278 val_278 278 val_278 278 val_278 +278 val_278 278 val_278 278 val_278 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +66 val_66 66 val_66 66 val_66 +98 val_98 98 val_98 98 val_98 +98 val_98 98 val_98 98 val_98 +98 val_98 98 val_98 98 val_98 +98 val_98 98 val_98 98 val_98 +PREHOOK: query: select * from src1 a left outer join src b on (a.key = b.key) left outer join src c on (a.value = c.value) order by a.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +#### A masked pattern was here #### +POSTHOOK: query: select * from src1 a left outer join src b on (a.key = b.key) left outer join src c on (a.value = c.value) order by a.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +#### A masked pattern was here #### + val_27 NULL NULL 27 val_27 + NULL NULL NULL NULL + NULL NULL NULL NULL + val_484 NULL NULL 484 val_484 + val_409 NULL NULL 409 val_409 + val_409 NULL NULL 409 val_409 + val_409 NULL NULL 409 val_409 + NULL NULL NULL NULL + NULL NULL NULL NULL + val_165 NULL NULL 165 val_165 + val_165 NULL NULL 165 val_165 + val_193 NULL NULL 193 val_193 + val_193 NULL NULL 193 val_193 + val_193 NULL NULL 193 val_193 + val_265 NULL NULL 265 val_265 + val_265 NULL NULL 265 val_265 +128 128 val_128 NULL NULL +128 128 val_128 NULL NULL +128 128 val_128 NULL NULL +146 val_146 146 val_146 146 val_146 +146 val_146 146 val_146 146 val_146 +146 val_146 146 val_146 146 val_146 +146 val_146 146 val_146 146 val_146 +150 val_150 150 val_150 150 val_150 +213 val_213 213 val_213 213 val_213 +213 val_213 213 val_213 213 val_213 +213 val_213 213 val_213 213 val_213 +213 val_213 213 val_213 213 val_213 +224 224 val_224 NULL NULL +224 224 val_224 NULL NULL +238 val_238 238 val_238 238 val_238 +238 val_238 238 val_238 238 val_238 +238 val_238 238 val_238 238 val_238 +238 val_238 238 val_238 238 val_238 +255 val_255 255 val_255 255 val_255 +255 val_255 255 val_255 255 val_255 +255 val_255 255 val_255 255 val_255 +255 val_255 255 val_255 255 val_255 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +278 val_278 278 val_278 278 val_278 +278 val_278 278 val_278 278 val_278 +278 val_278 278 val_278 278 val_278 +278 val_278 278 val_278 278 val_278 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +369 369 val_369 NULL NULL +369 369 val_369 NULL NULL +369 369 val_369 NULL NULL +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +66 val_66 66 val_66 66 val_66 +98 val_98 98 val_98 98 val_98 +98 val_98 98 val_98 98 val_98 +98 val_98 98 val_98 98 val_98 +98 val_98 98 val_98 98 val_98 +PREHOOK: query: select * from src1 a left outer join src b on (a.key = b.key) join src c on (a.key = c.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +#### A masked pattern was here #### +POSTHOOK: query: select * from src1 a left outer join src b on (a.key = b.key) join src c on (a.key = c.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +#### A masked pattern was here #### +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +146 val_146 146 val_146 146 val_146 +146 val_146 146 val_146 146 val_146 +146 val_146 146 val_146 146 val_146 +146 val_146 146 val_146 146 val_146 +150 val_150 150 val_150 150 val_150 +213 val_213 213 val_213 213 val_213 +213 val_213 213 val_213 213 val_213 +213 val_213 213 val_213 213 val_213 +213 val_213 213 val_213 213 val_213 +224 224 val_224 224 val_224 +224 224 val_224 224 val_224 +224 224 val_224 224 val_224 +224 224 val_224 224 val_224 +238 val_238 238 val_238 238 val_238 +238 val_238 238 val_238 238 val_238 +238 val_238 238 val_238 238 val_238 +238 val_238 238 val_238 238 val_238 +255 val_255 255 val_255 255 val_255 +255 val_255 255 val_255 255 val_255 +255 val_255 255 val_255 255 val_255 +255 val_255 255 val_255 255 val_255 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +278 val_278 278 val_278 278 val_278 +278 val_278 278 val_278 278 val_278 +278 val_278 278 val_278 278 val_278 +278 val_278 278 val_278 278 val_278 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +66 val_66 66 val_66 66 val_66 +98 val_98 98 val_98 98 val_98 +98 val_98 98 val_98 98 val_98 +98 val_98 98 val_98 98 val_98 +98 val_98 98 val_98 98 val_98 +PREHOOK: query: select * from src1 a join src b on (a.key = b.key) join src c on (a.key = c.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +#### A masked pattern was here #### +POSTHOOK: query: select * from src1 a join src b on (a.key = b.key) join src c on (a.key = c.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +#### A masked pattern was here #### +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +128 128 val_128 128 val_128 +146 val_146 146 val_146 146 val_146 +146 val_146 146 val_146 146 val_146 +146 val_146 146 val_146 146 val_146 +146 val_146 146 val_146 146 val_146 +150 val_150 150 val_150 150 val_150 +213 val_213 213 val_213 213 val_213 +213 val_213 213 val_213 213 val_213 +213 val_213 213 val_213 213 val_213 +213 val_213 213 val_213 213 val_213 +224 224 val_224 224 val_224 +224 224 val_224 224 val_224 +224 224 val_224 224 val_224 +224 224 val_224 224 val_224 +238 val_238 238 val_238 238 val_238 +238 val_238 238 val_238 238 val_238 +238 val_238 238 val_238 238 val_238 +238 val_238 238 val_238 238 val_238 +255 val_255 255 val_255 255 val_255 +255 val_255 255 val_255 255 val_255 +255 val_255 255 val_255 255 val_255 +255 val_255 255 val_255 255 val_255 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +273 val_273 273 val_273 273 val_273 +278 val_278 278 val_278 278 val_278 +278 val_278 278 val_278 278 val_278 +278 val_278 278 val_278 278 val_278 +278 val_278 278 val_278 278 val_278 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +311 val_311 311 val_311 311 val_311 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +369 369 val_369 369 val_369 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +401 val_401 401 val_401 401 val_401 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +406 val_406 406 val_406 406 val_406 +66 val_66 66 val_66 66 val_66 +98 val_98 98 val_98 98 val_98 +98 val_98 98 val_98 98 val_98 +98 val_98 98 val_98 98 val_98 +98 val_98 98 val_98 98 val_98 +PREHOOK: query: select count(*) from src1 a join src b on (a.key = b.key) join src c on (a.key = c.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from src1 a join src b on (a.key = b.key) join src c on (a.key = c.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +#### A masked pattern was here #### +107 Index: ql/src/test/results/clientpositive/tez/tez_joins_explain.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/tez_joins_explain.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/tez_joins_explain.q.out (working copy) @@ -0,0 +1,737 @@ +PREHOOK: query: explain +select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_RIGHTOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_LEFTOUTERJOIN (TOK_TABREF (TOK_TABNAME src1) a) (TOK_TABREF (TOK_TABNAME src) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL b) key))))) x) (TOK_TABREF (TOK_TABNAME src) c) (= (. (TOK_TABLE_OR_COL x) value) (. (TOK_TABLE_OR_COL c) value)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL x) key))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Alias -> Map Operator Tree: + b + TableScan + alias: b + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 1 + value expressions: + expr: key + type: string + expr: value + type: string + Alias -> Map Operator Tree: + a + TableScan + alias: a + Reduce Output Operator + key expressions: + expr: key + type: string + sort order: + + Map-reduce partition columns: + expr: key + type: string + tag: 0 + Reduce Operator Tree: + Join Operator + condition map: + Left Outer Join0 to 1 + condition expressions: + 0 + 1 {VALUE._col0} {VALUE._col1} + handleSkewJoin: false + outputColumnNames: _col4, _col5 + Select Operator + expressions: + expr: _col4 + type: string + expr: _col5 + type: string + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Reduce Operator Tree: + Extract + Reduce Output Operator + key expressions: + expr: _col1 + type: string + sort order: + + Map-reduce partition columns: + expr: _col1 + type: string + tag: 0 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + Alias -> Map Operator Tree: + c + TableScan + alias: c + Reduce Output Operator + key expressions: + expr: value + type: string + sort order: + + Map-reduce partition columns: + expr: value + type: string + tag: 1 + value expressions: + expr: key + type: string + expr: value + type: string + Reduce Operator Tree: + Join Operator + condition map: + Right Outer Join0 to 1 + condition expressions: + 0 {VALUE._col0} {VALUE._col1} + 1 {VALUE._col0} {VALUE._col1} + handleSkewJoin: false + outputColumnNames: _col0, _col1, _col2, _col3 + Select Operator + expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col2 + type: string + expr: _col3 + type: string + outputColumnNames: _col0, _col1, _col2, _col3 + Reduce Output Operator + key expressions: + expr: _col0 + type: string + sort order: + + tag: -1 + value expressions: + expr: _col0 + type: string + expr: _col1 + type: string + expr: _col2 + type: string + expr: _col3 + type: string + Reduce Operator Tree: + Extract + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +#### A masked pattern was here #### +NULL NULL 0 val_0 +NULL NULL 97 val_97 +NULL NULL 97 val_97 +NULL NULL 96 val_96 +NULL NULL 95 val_95 +NULL NULL 95 val_95 +NULL NULL 92 val_92 +NULL NULL 90 val_90 +NULL NULL 90 val_90 +NULL NULL 90 val_90 +NULL NULL 9 val_9 +NULL NULL 87 val_87 +NULL NULL 86 val_86 +NULL NULL 85 val_85 +NULL NULL 84 val_84 +NULL NULL 84 val_84 +NULL NULL 83 val_83 +NULL NULL 83 val_83 +NULL NULL 82 val_82 +NULL NULL 80 val_80 +NULL NULL 8 val_8 +NULL NULL 78 val_78 +NULL NULL 77 val_77 +NULL NULL 76 val_76 +NULL NULL 76 val_76 +NULL NULL 74 val_74 +NULL NULL 72 val_72 +NULL NULL 72 val_72 +NULL NULL 70 val_70 +NULL NULL 70 val_70 +NULL NULL 70 val_70 +NULL NULL 69 val_69 +NULL NULL 67 val_67 +NULL NULL 67 val_67 +NULL NULL 65 val_65 +NULL NULL 64 val_64 +NULL NULL 58 val_58 +NULL NULL 58 val_58 +NULL NULL 57 val_57 +NULL NULL 54 val_54 +NULL NULL 53 val_53 +NULL NULL 51 val_51 +NULL NULL 51 val_51 +NULL NULL 5 val_5 +NULL NULL 5 val_5 +NULL NULL 5 val_5 +NULL NULL 498 val_498 +NULL NULL 498 val_498 +NULL NULL 498 val_498 +NULL NULL 497 val_497 +NULL NULL 496 val_496 +NULL NULL 495 val_495 +NULL NULL 494 val_494 +NULL NULL 493 val_493 +NULL NULL 492 val_492 +NULL NULL 492 val_492 +NULL NULL 491 val_491 +NULL NULL 490 val_490 +NULL NULL 489 val_489 +NULL NULL 489 val_489 +NULL NULL 489 val_489 +NULL NULL 489 val_489 +NULL NULL 487 val_487 +NULL NULL 485 val_485 +NULL NULL 484 val_484 +NULL NULL 483 val_483 +NULL NULL 482 val_482 +NULL NULL 481 val_481 +NULL NULL 480 val_480 +NULL NULL 480 val_480 +NULL NULL 480 val_480 +NULL NULL 479 val_479 +NULL NULL 478 val_478 +NULL NULL 478 val_478 +NULL NULL 477 val_477 +NULL NULL 475 val_475 +NULL NULL 472 val_472 +NULL NULL 470 val_470 +NULL NULL 47 val_47 +NULL NULL 469 val_469 +NULL NULL 469 val_469 +NULL NULL 469 val_469 +NULL NULL 469 val_469 +NULL NULL 469 val_469 +NULL NULL 468 val_468 +NULL NULL 468 val_468 +NULL NULL 468 val_468 +NULL NULL 468 val_468 +NULL NULL 467 val_467 +NULL NULL 466 val_466 +NULL NULL 466 val_466 +NULL NULL 466 val_466 +NULL NULL 463 val_463 +NULL NULL 463 val_463 +NULL NULL 462 val_462 +NULL NULL 462 val_462 +NULL NULL 460 val_460 +NULL NULL 459 val_459 +NULL NULL 459 val_459 +NULL NULL 458 val_458 +NULL NULL 458 val_458 +NULL NULL 457 val_457 +NULL NULL 455 val_455 +NULL NULL 454 val_454 +NULL NULL 454 val_454 +NULL NULL 454 val_454 +NULL NULL 453 val_453 +NULL NULL 452 val_452 +NULL NULL 449 val_449 +NULL NULL 448 val_448 +NULL NULL 446 val_446 +NULL NULL 444 val_444 +NULL NULL 443 val_443 +NULL NULL 44 val_44 +NULL NULL 439 val_439 +NULL NULL 439 val_439 +NULL NULL 438 val_438 +NULL NULL 438 val_438 +NULL NULL 438 val_438 +NULL NULL 437 val_437 +NULL NULL 436 val_436 +NULL NULL 435 val_435 +NULL NULL 432 val_432 +NULL NULL 431 val_431 +NULL NULL 431 val_431 +NULL NULL 431 val_431 +NULL NULL 430 val_430 +NULL NULL 430 val_430 +NULL NULL 430 val_430 +NULL NULL 43 val_43 +NULL NULL 429 val_429 +NULL NULL 429 val_429 +NULL NULL 427 val_427 +NULL NULL 424 val_424 +NULL NULL 424 val_424 +NULL NULL 421 val_421 +NULL NULL 42 val_42 +NULL NULL 42 val_42 +NULL NULL 419 val_419 +NULL NULL 418 val_418 +NULL NULL 417 val_417 +NULL NULL 417 val_417 +NULL NULL 417 val_417 +NULL NULL 414 val_414 +NULL NULL 414 val_414 +NULL NULL 413 val_413 +NULL NULL 413 val_413 +NULL NULL 411 val_411 +NULL NULL 41 val_41 +NULL NULL 409 val_409 +NULL NULL 409 val_409 +NULL NULL 409 val_409 +NULL NULL 407 val_407 +NULL NULL 404 val_404 +NULL NULL 404 val_404 +NULL NULL 403 val_403 +NULL NULL 403 val_403 +NULL NULL 403 val_403 +NULL NULL 402 val_402 +NULL NULL 400 val_400 +NULL NULL 4 val_4 +NULL NULL 399 val_399 +NULL NULL 399 val_399 +NULL NULL 397 val_397 +NULL NULL 397 val_397 +NULL NULL 396 val_396 +NULL NULL 396 val_396 +NULL NULL 396 val_396 +NULL NULL 395 val_395 +NULL NULL 395 val_395 +NULL NULL 394 val_394 +NULL NULL 393 val_393 +NULL NULL 392 val_392 +NULL NULL 389 val_389 +NULL NULL 386 val_386 +NULL NULL 384 val_384 +NULL NULL 384 val_384 +NULL NULL 384 val_384 +NULL NULL 382 val_382 +NULL NULL 382 val_382 +NULL NULL 379 val_379 +NULL NULL 378 val_378 +NULL NULL 377 val_377 +NULL NULL 375 val_375 +NULL NULL 374 val_374 +NULL NULL 373 val_373 +NULL NULL 37 val_37 +NULL NULL 37 val_37 +NULL NULL 368 val_368 +NULL NULL 367 val_367 +NULL NULL 367 val_367 +NULL NULL 366 val_366 +NULL NULL 365 val_365 +NULL NULL 364 val_364 +NULL NULL 362 val_362 +NULL NULL 360 val_360 +NULL NULL 356 val_356 +NULL NULL 353 val_353 +NULL NULL 353 val_353 +NULL NULL 351 val_351 +NULL NULL 35 val_35 +NULL NULL 35 val_35 +NULL NULL 35 val_35 +NULL NULL 348 val_348 +NULL NULL 348 val_348 +NULL NULL 348 val_348 +NULL NULL 348 val_348 +NULL NULL 348 val_348 +NULL NULL 345 val_345 +NULL NULL 344 val_344 +NULL NULL 344 val_344 +NULL NULL 342 val_342 +NULL NULL 342 val_342 +NULL NULL 341 val_341 +NULL NULL 34 val_34 +NULL NULL 339 val_339 +NULL NULL 338 val_338 +NULL NULL 336 val_336 +NULL NULL 335 val_335 +NULL NULL 333 val_333 +NULL NULL 333 val_333 +NULL NULL 332 val_332 +NULL NULL 331 val_331 +NULL NULL 331 val_331 +NULL NULL 33 val_33 +NULL NULL 327 val_327 +NULL NULL 327 val_327 +NULL NULL 327 val_327 +NULL NULL 325 val_325 +NULL NULL 325 val_325 +NULL NULL 323 val_323 +NULL NULL 322 val_322 +NULL NULL 322 val_322 +NULL NULL 321 val_321 +NULL NULL 321 val_321 +NULL NULL 318 val_318 +NULL NULL 318 val_318 +NULL NULL 318 val_318 +NULL NULL 317 val_317 +NULL NULL 317 val_317 +NULL NULL 316 val_316 +NULL NULL 316 val_316 +NULL NULL 316 val_316 +NULL NULL 315 val_315 +NULL NULL 310 val_310 +NULL NULL 309 val_309 +NULL NULL 309 val_309 +NULL NULL 308 val_308 +NULL NULL 307 val_307 +NULL NULL 307 val_307 +NULL NULL 306 val_306 +NULL NULL 305 val_305 +NULL NULL 302 val_302 +NULL NULL 30 val_30 +NULL NULL 298 val_298 +NULL NULL 298 val_298 +NULL NULL 298 val_298 +NULL NULL 296 val_296 +NULL NULL 292 val_292 +NULL NULL 291 val_291 +NULL NULL 289 val_289 +NULL NULL 288 val_288 +NULL NULL 288 val_288 +NULL NULL 287 val_287 +NULL NULL 286 val_286 +NULL NULL 285 val_285 +NULL NULL 284 val_284 +NULL NULL 283 val_283 +NULL NULL 282 val_282 +NULL NULL 282 val_282 +NULL NULL 281 val_281 +NULL NULL 281 val_281 +NULL NULL 280 val_280 +NULL NULL 280 val_280 +NULL NULL 28 val_28 +NULL NULL 277 val_277 +NULL NULL 277 val_277 +NULL NULL 277 val_277 +NULL NULL 277 val_277 +NULL NULL 275 val_275 +NULL NULL 274 val_274 +NULL NULL 272 val_272 +NULL NULL 272 val_272 +NULL NULL 27 val_27 +NULL NULL 266 val_266 +NULL NULL 265 val_265 +NULL NULL 265 val_265 +NULL NULL 263 val_263 +NULL NULL 262 val_262 +NULL NULL 260 val_260 +NULL NULL 26 val_26 +NULL NULL 26 val_26 +NULL NULL 258 val_258 +NULL NULL 257 val_257 +NULL NULL 256 val_256 +NULL NULL 256 val_256 +NULL NULL 252 val_252 +NULL NULL 249 val_249 +NULL NULL 248 val_248 +NULL NULL 247 val_247 +NULL NULL 244 val_244 +NULL NULL 242 val_242 +NULL NULL 242 val_242 +NULL NULL 241 val_241 +NULL NULL 24 val_24 +NULL NULL 24 val_24 +NULL NULL 239 val_239 +NULL NULL 239 val_239 +NULL NULL 237 val_237 +NULL NULL 237 val_237 +NULL NULL 235 val_235 +NULL NULL 233 val_233 +NULL NULL 233 val_233 +NULL NULL 230 val_230 +NULL NULL 230 val_230 +NULL NULL 230 val_230 +NULL NULL 230 val_230 +NULL NULL 230 val_230 +NULL NULL 229 val_229 +NULL NULL 229 val_229 +NULL NULL 228 val_228 +NULL NULL 226 val_226 +NULL NULL 223 val_223 +NULL NULL 223 val_223 +NULL NULL 222 val_222 +NULL NULL 221 val_221 +NULL NULL 221 val_221 +NULL NULL 219 val_219 +NULL NULL 219 val_219 +NULL NULL 218 val_218 +NULL NULL 217 val_217 +NULL NULL 217 val_217 +NULL NULL 216 val_216 +NULL NULL 216 val_216 +NULL NULL 214 val_214 +NULL NULL 209 val_209 +NULL NULL 209 val_209 +NULL NULL 208 val_208 +NULL NULL 208 val_208 +NULL NULL 208 val_208 +NULL NULL 207 val_207 +NULL NULL 207 val_207 +NULL NULL 205 val_205 +NULL NULL 205 val_205 +NULL NULL 203 val_203 +NULL NULL 203 val_203 +NULL NULL 202 val_202 +NULL NULL 201 val_201 +NULL NULL 200 val_200 +NULL NULL 200 val_200 +NULL NULL 20 val_20 +NULL NULL 2 val_2 +NULL NULL 199 val_199 +NULL NULL 199 val_199 +NULL NULL 199 val_199 +NULL NULL 197 val_197 +NULL NULL 197 val_197 +NULL NULL 196 val_196 +NULL NULL 195 val_195 +NULL NULL 195 val_195 +NULL NULL 194 val_194 +NULL NULL 193 val_193 +NULL NULL 193 val_193 +NULL NULL 193 val_193 +NULL NULL 192 val_192 +NULL NULL 191 val_191 +NULL NULL 191 val_191 +NULL NULL 190 val_190 +NULL NULL 19 val_19 +NULL NULL 189 val_189 +NULL NULL 187 val_187 +NULL NULL 187 val_187 +NULL NULL 187 val_187 +NULL NULL 186 val_186 +NULL NULL 183 val_183 +NULL NULL 181 val_181 +NULL NULL 180 val_180 +NULL NULL 18 val_18 +NULL NULL 18 val_18 +NULL NULL 179 val_179 +NULL NULL 179 val_179 +NULL NULL 178 val_178 +NULL NULL 177 val_177 +NULL NULL 176 val_176 +NULL NULL 176 val_176 +NULL NULL 175 val_175 +NULL NULL 175 val_175 +NULL NULL 174 val_174 +NULL NULL 174 val_174 +NULL NULL 172 val_172 +NULL NULL 172 val_172 +NULL NULL 170 val_170 +NULL NULL 17 val_17 +NULL NULL 169 val_169 +NULL NULL 169 val_169 +NULL NULL 169 val_169 +NULL NULL 169 val_169 +NULL NULL 168 val_168 +NULL NULL 167 val_167 +NULL NULL 167 val_167 +NULL NULL 167 val_167 +NULL NULL 166 val_166 +NULL NULL 165 val_165 +NULL NULL 165 val_165 +NULL NULL 164 val_164 +NULL NULL 164 val_164 +NULL NULL 163 val_163 +NULL NULL 162 val_162 +NULL NULL 160 val_160 +NULL NULL 158 val_158 +NULL NULL 157 val_157 +NULL NULL 156 val_156 +NULL NULL 155 val_155 +NULL NULL 153 val_153 +NULL NULL 152 val_152 +NULL NULL 152 val_152 +NULL NULL 15 val_15 +NULL NULL 15 val_15 +NULL NULL 149 val_149 +NULL NULL 149 val_149 +NULL NULL 145 val_145 +NULL NULL 143 val_143 +NULL NULL 138 val_138 +NULL NULL 138 val_138 +NULL NULL 138 val_138 +NULL NULL 138 val_138 +NULL NULL 137 val_137 +NULL NULL 137 val_137 +NULL NULL 136 val_136 +NULL NULL 134 val_134 +NULL NULL 134 val_134 +NULL NULL 133 val_133 +NULL NULL 131 val_131 +NULL NULL 129 val_129 +NULL NULL 129 val_129 +NULL NULL 126 val_126 +NULL NULL 125 val_125 +NULL NULL 125 val_125 +NULL NULL 120 val_120 +NULL NULL 120 val_120 +NULL NULL 12 val_12 +NULL NULL 12 val_12 +NULL NULL 119 val_119 +NULL NULL 119 val_119 +NULL NULL 119 val_119 +NULL NULL 118 val_118 +NULL NULL 118 val_118 +NULL NULL 116 val_116 +NULL NULL 114 val_114 +NULL NULL 113 val_113 +NULL NULL 113 val_113 +NULL NULL 111 val_111 +NULL NULL 11 val_11 +NULL NULL 105 val_105 +NULL NULL 104 val_104 +NULL NULL 104 val_104 +NULL NULL 103 val_103 +NULL NULL 103 val_103 +NULL NULL 100 val_100 +NULL NULL 100 val_100 +NULL NULL 10 val_10 +NULL NULL 0 val_0 +NULL NULL 0 val_0 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +146 val_146 146 val_146 +146 val_146 146 val_146 +146 val_146 146 val_146 +146 val_146 146 val_146 +150 val_150 150 val_150 +213 val_213 213 val_213 +213 val_213 213 val_213 +213 val_213 213 val_213 +213 val_213 213 val_213 +224 val_224 224 val_224 +224 val_224 224 val_224 +224 val_224 224 val_224 +224 val_224 224 val_224 +238 val_238 238 val_238 +238 val_238 238 val_238 +238 val_238 238 val_238 +238 val_238 238 val_238 +255 val_255 255 val_255 +255 val_255 255 val_255 +255 val_255 255 val_255 +255 val_255 255 val_255 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +278 val_278 278 val_278 +278 val_278 278 val_278 +278 val_278 278 val_278 +278 val_278 278 val_278 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +66 val_66 66 val_66 +98 val_98 98 val_98 +98 val_98 98 val_98 +98 val_98 98 val_98 +98 val_98 98 val_98 Index: ql/src/test/results/clientpositive/tez/vectorization_15.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/vectorization_15.q.out (revision 0) +++ ql/src/test/results/clientpositive/tez/vectorization_15.q.out (working copy) @@ -0,0 +1,109 @@ +PREHOOK: query: SELECT cfloat, + cboolean1, + cdouble, + cstring1, + ctinyint, + cint, + ctimestamp1, + STDDEV_SAMP(cfloat), + (-26.28 - cint), + MIN(cdouble), + (cdouble * 79.553), + (33 % cfloat), + STDDEV_SAMP(ctinyint), + VAR_POP(ctinyint), + (-23 % cdouble), + (-(ctinyint)), + VAR_SAMP(cint), + (cint - cfloat), + (-23 % ctinyint), + (-((-26.28 - cint))), + STDDEV_POP(cint) +FROM alltypesorc +WHERE (((cstring2 LIKE '%ss%') + OR (cstring1 LIKE '10%')) + OR ((cint >= -75) + AND ((ctinyint = csmallint) + AND (cdouble >= -3728)))) +GROUP BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT cfloat, + cboolean1, + cdouble, + cstring1, + ctinyint, + cint, + ctimestamp1, + STDDEV_SAMP(cfloat), + (-26.28 - cint), + MIN(cdouble), + (cdouble * 79.553), + (33 % cfloat), + STDDEV_SAMP(ctinyint), + VAR_POP(ctinyint), + (-23 % cdouble), + (-(ctinyint)), + VAR_SAMP(cint), + (cint - cfloat), + (-23 % ctinyint), + (-((-26.28 - cint))), + STDDEV_POP(cint) +FROM alltypesorc +WHERE (((cstring2 LIKE '%ss%') + OR (cstring1 LIKE '10%')) + OR ((cint >= -75) + AND ((ctinyint = csmallint) + AND (cdouble >= -3728)))) +GROUP BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +NULL false -13008.0 10 NULL -896629175 NULL NULL 8.9662914872E8 -13008.0 -1034825.424 NULL NULL NULL -23.0 NULL 0.0 NULL NULL -8.9662914872E8 0.0 +NULL false -7707.0 10vke853 NULL 424959354 NULL NULL -4.2495938028E8 -7707.0 -613114.971 NULL NULL NULL -23.0 NULL 0.0 NULL NULL 4.2495938028E8 0.0 +NULL false 2459.0 10 NULL -394064473 NULL NULL 3.9406444672E8 2459.0 195620.827 NULL NULL NULL -23.0 NULL 0.0 NULL NULL -3.9406444672E8 0.0 +NULL false 14771.0 10 NULL 993788576 NULL NULL -9.9378860228E8 14771.0 1175077.363 NULL NULL NULL -23.0 NULL 0.0 NULL NULL 9.9378860228E8 0.0 +NULL true -14674.0 105aFDAt30c4rI4U NULL -831072496 NULL NULL 8.3107246972E8 -14674.0 -1167360.722 NULL NULL NULL -23.0 NULL 0.0 NULL NULL -8.3107246972E8 0.0 +NULL true -14640.0 101n6n461o NULL -819657767 NULL NULL 8.1965774072E8 -14640.0 -1164655.92 NULL NULL NULL -23.0 NULL 0.0 NULL NULL -8.1965774072E8 0.0 +NULL true -9489.0 10Yr6 NULL 500274721 NULL NULL -5.0027474728E8 -9489.0 -754878.417 NULL NULL NULL -23.0 NULL 0.0 NULL NULL 5.0027474728E8 0.0 +NULL true -7238.0 10ljXCFT6fG6Qi3S7414e NULL -256767096 NULL NULL 2.5676706972E8 -7238.0 -575804.614 NULL NULL NULL -23.0 NULL 0.0 NULL NULL -2.5676706972E8 0.0 +NULL true 2261.0 10Hr5oB07Ohu0622u NULL 612000160 NULL NULL -6.1200018628E8 2261.0 179869.33299999998 NULL NULL NULL -23.0 NULL 0.0 NULL NULL 6.1200018628E8 0.0 +NULL true 4516.0 10lL0XD6WP2x64f70N0fHmC1 NULL -974538365 NULL NULL 9.7453833872E8 4516.0 359261.348 NULL NULL NULL -23.0 NULL 0.0 NULL NULL -9.7453833872E8 0.0 +NULL true 4767.0 10dUdwyXp5XwgpkTxLffmv3x NULL 187503456 NULL NULL -1.8750348228E8 4767.0 379229.151 NULL NULL NULL -23.0 NULL 0.0 NULL NULL 1.8750348228E8 0.0 +NULL true 5658.0 10M3eGUsKVonbl70DyoCk25 NULL 486382507 NULL NULL -4.8638253328E8 5658.0 450110.874 NULL NULL NULL -23.0 NULL 0.0 NULL NULL 4.8638253328E8 0.0 +NULL true 8659.0 1047piRsT3c3r134I NULL 487236176 NULL NULL -4.8723620228E8 8659.0 688849.427 NULL NULL NULL -23.0 NULL 0.0 NULL NULL 4.8723620228E8 0.0 +NULL true 10419.0 10 NULL -721614386 NULL NULL 7.2161435972E8 10419.0 828862.7069999999 NULL NULL NULL -23.0 NULL 0.0 NULL NULL -7.2161435972E8 0.0 +NULL true 14519.0 100xJdkyc NULL 729277608 NULL NULL -7.2927763428E8 14519.0 1155030.007 NULL NULL NULL -23.0 NULL 0.0 NULL NULL 7.2927763428E8 0.0 +-62.0 NULL 15601.0 NULL -62 NULL 1969-12-31 16:00:09.889 0.0 NULL 15601.0 1241106.353 33.0 0.0 0.0 -23.0 62 NULL NULL -23 NULL NULL +-51.0 NULL -200.0 NULL -51 NULL 1969-12-31 15:59:55.423 0.0 NULL -200.0 -15910.599999999999 33.0 0.0 0.0 -23.0 51 NULL NULL -23 NULL NULL +-51.0 false NULL 10 -51 1058319346 1969-12-31 16:00:08.451 0.0 -1.05831937228E9 NULL NULL 33.0 0.0 0.0 NULL 51 0.0 1.058319397E9 -23 1.05831937228E9 0.0 +-51.0 false NULL 10TYIE5S35U6dj3N -51 -469581869 1969-12-31 16:00:08.451 0.0 4.6958184272E8 NULL NULL 33.0 0.0 0.0 NULL 51 0.0 -4.69581818E8 -23 -4.6958184272E8 0.0 +-51.0 false NULL 1Lh6Uoq3WhNtOqQHu7WN7U -51 -352637533 1969-12-31 16:00:08.451 0.0 3.5263750672E8 NULL NULL 33.0 0.0 0.0 NULL 51 0.0 -3.52637482E8 -23 -3.5263750672E8 0.0 +-51.0 true NULL 04Y1mA17 -51 -114647521 1969-12-31 16:00:08.451 0.0 1.1464749472E8 NULL NULL 33.0 0.0 0.0 NULL 51 0.0 -1.1464747E8 -23 -1.1464749472E8 0.0 +-51.0 true NULL 10Wu570aLPO0p02P17FeH -51 405338893 1969-12-31 16:00:08.451 0.0 -4.0533891928E8 NULL NULL 33.0 0.0 0.0 NULL 51 0.0 4.05338944E8 -23 4.0533891928E8 0.0 +-51.0 true NULL 3cQp060 -51 -226923315 1969-12-31 16:00:08.451 0.0 2.2692328872E8 NULL NULL 33.0 0.0 0.0 NULL 51 0.0 -2.26923264E8 -23 -2.2692328872E8 0.0 +-51.0 true NULL 8EPG0Xi307qd -51 -328662044 1969-12-31 16:00:08.451 0.0 3.2866201772E8 NULL NULL 33.0 0.0 0.0 NULL 51 0.0 -3.28661993E8 -23 -3.2866201772E8 0.0 +-51.0 true NULL 8iHtdkJ6d -51 1006818344 1969-12-31 16:00:08.451 0.0 -1.00681837028E9 NULL NULL 33.0 0.0 0.0 NULL 51 0.0 1.006818395E9 -23 1.00681837028E9 0.0 +-51.0 true NULL QiOcvR0kt6r7f0R7fiPxQTCU -51 266531954 1969-12-31 16:00:08.451 0.0 -2.6653198028E8 NULL NULL 33.0 0.0 0.0 NULL 51 0.0 2.66532005E8 -23 2.6653198028E8 0.0 +-51.0 true NULL Ybpj38RTTYl7CnJXPNx1g4C -51 -370919370 1969-12-31 16:00:08.451 0.0 3.7091934372E8 NULL NULL 33.0 0.0 0.0 NULL 51 0.0 -3.70919319E8 -23 -3.7091934372E8 0.0 +-48.0 NULL -7196.0 NULL -48 NULL 1969-12-31 16:00:06.337 0.0 NULL -7196.0 -572463.388 33.0 0.0 0.0 -23.0 48 NULL NULL -23 NULL NULL +-6.0 NULL -200.0 NULL -6 NULL 1969-12-31 15:59:56.094 0.0 NULL -200.0 -15910.599999999999 3.0 0.0 0.0 -23.0 6 NULL NULL -5 NULL NULL +5.0 NULL 15601.0 NULL 5 NULL 1969-12-31 16:00:00.959 0.0 NULL 15601.0 1241106.353 3.0 0.0 0.0 -23.0 -5 NULL NULL -3 NULL NULL +8.0 false NULL 10V3pN5r5lI2qWl2lG103 8 -362835731 1969-12-31 16:00:15.892 0.0 3.6283570472E8 NULL NULL 1.0 0.0 0.0 NULL -8 0.0 -3.62835739E8 -7 -3.6283570472E8 0.0 +8.0 false NULL 10c4qt584m5y6uWT 8 -183000142 1969-12-31 16:00:15.892 0.0 1.8300011572E8 NULL NULL 1.0 0.0 0.0 NULL -8 0.0 -1.8300015E8 -7 -1.8300011572E8 0.0 +8.0 false NULL 8GloEukQ0c68JDmnYL53 8 -722873402 1969-12-31 16:00:15.892 0.0 7.2287337572E8 NULL NULL 1.0 0.0 0.0 NULL -8 0.0 -7.2287341E8 -7 -7.2287337572E8 0.0 +8.0 false NULL kA0XH5C5 8 -503903864 1969-12-31 16:00:15.892 0.0 5.0390383772E8 NULL NULL 1.0 0.0 0.0 NULL -8 0.0 -5.03903872E8 -7 -5.0390383772E8 0.0 +8.0 true NULL 100VTM7PEW8GH1uE 8 88129338 1969-12-31 16:00:15.892 0.0 -8.812936428E7 NULL NULL 1.0 0.0 0.0 NULL -8 0.0 8.812933E7 -7 8.812936428E7 0.0 +8.0 true NULL 1062158y 8 -1005155523 1969-12-31 16:00:15.892 0.0 1.00515549672E9 NULL NULL 1.0 0.0 0.0 NULL -8 0.0 -1.005155531E9 -7 -1.00515549672E9 0.0 +8.0 true NULL 1063cEnGjSal 8 -624769630 1969-12-31 16:00:15.892 0.0 6.2476960372E8 NULL NULL 1.0 0.0 0.0 NULL -8 0.0 -6.24769638E8 -7 -6.2476960372E8 0.0 +8.0 true NULL 4kMasVoB7lX1wc5i64bNk 8 683567667 1969-12-31 16:00:15.892 0.0 -6.8356769328E8 NULL NULL 1.0 0.0 0.0 NULL -8 0.0 6.83567659E8 -7 6.8356769328E8 0.0 +8.0 true NULL XH6I7A417 8 436627202 1969-12-31 16:00:15.892 0.0 -4.3662722828E8 NULL NULL 1.0 0.0 0.0 NULL -8 0.0 4.36627194E8 -7 4.3662722828E8 0.0 +11.0 false NULL 10pO8p1LNx4Y 11 271296824 1969-12-31 16:00:02.351 0.0 -2.7129685028E8 NULL NULL 0.0 0.0 0.0 NULL -11 0.0 2.71296813E8 -1 2.7129685028E8 0.0 +11.0 false NULL 1H6wGP 11 -560827082 1969-12-31 16:00:02.351 0.0 5.6082705572E8 NULL NULL 0.0 0.0 0.0 NULL -11 0.0 -5.60827093E8 -1 -5.6082705572E8 0.0 +11.0 false NULL 2a7V63IL7jK3o 11 -325931647 1969-12-31 16:00:02.351 0.0 3.2593162072E8 NULL NULL 0.0 0.0 0.0 NULL -11 0.0 -3.25931658E8 -1 -3.2593162072E8 0.0 +11.0 true NULL 10 11 92365813 1969-12-31 16:00:02.351 0.0 -9.236583928E7 NULL NULL 0.0 0.0 0.0 NULL -11 0.0 9.2365802E7 -1 9.236583928E7 0.0 +21.0 NULL 15601.0 NULL 21 NULL 1969-12-31 16:00:14.256 0.0 NULL 15601.0 1241106.353 12.0 0.0 0.0 -23.0 -21 NULL NULL -2 NULL NULL +32.0 NULL -200.0 NULL 32 NULL 1969-12-31 16:00:02.445 0.0 NULL -200.0 -15910.599999999999 1.0 0.0 0.0 -23.0 -32 NULL NULL -23 NULL NULL +36.0 NULL -200.0 NULL 36 NULL 1969-12-31 16:00:00.554 0.0 NULL -200.0 -15910.599999999999 33.0 0.0 0.0 -23.0 -36 NULL NULL -23 NULL NULL +58.0 NULL 15601.0 NULL 58 NULL 1969-12-31 15:59:56.527 0.0 NULL 15601.0 1241106.353 33.0 0.0 0.0 -23.0 -58 NULL NULL -23 NULL NULL Index: ql/src/test/results/clientpositive/vector_left_outer_join.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_left_outer_join.q.out (revision 1553449) +++ ql/src/test/results/clientpositive/vector_left_outer_join.q.out (working copy) @@ -152,4 +152,4 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### -192735557 +225951785 Index: ql/src/test/results/clientpositive/vectorized_mapjoin.q.out =================================================================== --- ql/src/test/results/clientpositive/vectorized_mapjoin.q.out (revision 1553449) +++ ql/src/test/results/clientpositive/vectorized_mapjoin.q.out (working copy) @@ -131,4 +131,4 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### -3149925 1073680599 -1073051226 9.381482540406644E8 +3152013 1073680599 -1073279343 9.375396162525452E8 Index: ql/src/test/results/compiler/plan/case_sensitivity.q.xml =================================================================== --- ql/src/test/results/compiler/plan/case_sensitivity.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/case_sensitivity.q.xml (working copy) @@ -79,137 +79,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest1 + + + columns.types + string:string + + + serialization.ddl + struct dest1 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_6 + + + + - - FS_6 - - - - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + string + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -652,173 +648,114 @@ src_thrift - - - - - - - - - + + + + + + + + + + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_3 + + - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - + - - - - - - _col1 - - - - - - - - - lintstring - - - src_thrift - - - - - - - - - myint - - - mystring - - - underscore_int - - - - - - - - - int - - - - - - - - - - - - - - - - - - - - - - - - 0 - - - - - - - - - - - - - - - MYSTRING - - - false - - - - - - - - _col0 - + + + + + + + + + + + _col1 + + + - lint + lintstring src_thrift - + - + + + + + myint + + + mystring + + + underscore_int + + + + + + + + + int + + + + + + + + + + + + @@ -830,7 +767,7 @@ - 1 + 0 @@ -840,206 +777,253 @@ - + - - - - - - - - - - - - - + + MYSTRING - - - - _col0 - - - _col1 - - + + false - - - - SEL_2 - - - - - + + - - - + + _col0 + + - - - _col0 + + + lint - - + + src_thrift - - int + + + + + + - - - _col1 + + + - - + + 1 - - string - + + + + + + - - - - - - - + + + - - - - - - - lint - - - src_thrift - - - - - - - - - - - - - 0 - - - - - - - - - - - - + - - - - - - 0 - - + - - - - - - - boolean + + + + _col0 + + _col1 + - - - - FIL_4 - - - - - + + SEL_2 - - - - - - - - - - lint + + + + + + + + + + + + + _col0 + + + + + + int + + - - src_thrift + + + + _col1 + + + + + + string + + - - - - - array<int> - - - - - lintstring - - - src_thrift - - - - - - array<struct<myint:int,mystring:string,underscore_int:int>> - - - - - - + + + + + + + + + + + + + + + lint + + + src_thrift + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + boolean + + + + + + + + + FIL_4 + + + + + + + + + + + + + + lint + + + src_thrift + + + + + + array<int> + + + + + + + lintstring + + + src_thrift + + + + + + array<struct<myint:int,mystring:string,underscore_int:int>> + + + + + + + + + Index: ql/src/test/results/compiler/plan/cast1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/cast1.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/cast1.q.xml (working copy) @@ -153,754 +153,742 @@ src - - - - - - - - - + + + + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + hive.serialization.extend.nesting.levels + true + + + columns + _col0,_col1,_col2,_col3,_col4,_col5,_col6 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + serialization.format + 1 + + + columns.types + int:double:double:double:int:boolean:int + + + escape.delim + \ + + + + + + + 1 + + + + + FS_3 + + - - - - - #### A masked pattern was here #### + + + + + + + + + + + _col0 + + + + + + + + int + + + + + int + + + + + + + _col1 + + + + + + + + double + + + + + double + + + + + + + _col2 + + + + + + + + + double + + + + + + + _col3 + + + + + + + + + double + + + + + + + _col4 + + + + + + + + + int + + + + + + + _col5 + + + + + + + + boolean + + + + + boolean + + + + + + + _col6 + + + + + + + + + int + + + + + + + + + + + + + + _col6 + + + + + + + - - 1 - - - #### A masked pattern was here #### - - + true - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3,_col4,_col5,_col6 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - int:double:double:double:int:boolean:int - - - escape.delim - \ - - - - + + + + + + + + org.apache.hadoop.hive.ql.udf.UDFToInteger + + + UDFToInteger + + + + + + + + + + _col5 + + + + + + + - + 1 - - FS_3 + + + + + + org.apache.hadoop.hive.ql.udf.UDFToBoolean - - - - + + UDFToBoolean + + + + + + + + + + _col4 + + + + + + + + + 3 + - - - + + + - - - _col0 - - - - - - - - int - - - - - int - - - - - - - _col1 - - - - - - - - double - - - - - double - - - - - - - _col2 - - - - - + + - - double + + 2.0 - - - - _col3 - - - - - - - - - double - - + + + + + + org.apache.hadoop.hive.ql.udf.UDFToInteger - - - - _col4 - - - - - - - - - int - - + + UDFToInteger - - - - _col5 - - - - - - - - boolean - - - - - boolean - - - - - - - _col6 - - - - - - - - - int - - - + + + + + + + + + - - - - _col6 - - - - - - - - - - true - - + + _col3 + + + + + + + + + 3.0 + - - - - org.apache.hadoop.hive.ql.udf.UDFToInteger + + + + - - UDFToInteger + + 2.0 - - - - - _col5 - - - - - - - - - - 1 - - + + + + + + + + + + _col2 + + + + + + + + + 3 + - - - - org.apache.hadoop.hive.ql.udf.UDFToBoolean + + + + - - UDFToBoolean + + 2.0 - - - - - _col4 - - - - - - - - - - 3 - - + + + + + + + + + + _col1 + + + + + + + - - - - - - - - - - - 2.0 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFToInteger - - - UDFToInteger - - - - - - - + + 3.0 - - - - - - - - - - _col3 - - - - - - - - - - 3.0 - - + + + + - - - - - - - 2.0 - - + + 2 - - - - - - - - _col2 - - - - - - - - - - 3 - - + + + + + + + + + + _col0 + + + + + + + - - - - - - - 2.0 - - + + 3 - - - - - - - - - - _col1 - - - - - - - - - - 3.0 - - + + + + - - - - - - - 2 - - + + 2 - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 - - - - - - - - - - 3 - - - - - - - - - - 2 - - - - + + + _col1 + + + _col2 + + + _col3 + + + _col4 + + + _col5 + + + _col6 + + + + + + + SEL_2 + + + + + + + + + + + + + + _c0 - - + + _col0 - + + + int + - - - - - - - - + + + + _c1 - - + + _col1 - - + + - - + + double - - + + + + + + _c2 - - + + _col2 - - + + + + double + - - - - _col0 + + + + _c3 - - _col1 - - - _col2 - - + _col3 - - _col4 + + - - _col5 + + double - - _col6 - - - - - SEL_2 - - - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - int - - + + + _c4 - - - - _c1 - - - _col1 - - - - - - double - - + + _col4 - - - - _c2 - - - _col2 - - - - - - double - - + + - - - - _c3 - - - _col3 - - - - - - double - - + + int - - - - _c4 - - - _col4 - - - - - - int - - - - - - - _c5 - - - _col5 - - - - - - boolean - - - - - - - _c6 - - - _col6 - - - - - - int - - - - - - - - - - - - - - - - - - key + + + _c5 - - src + + _col5 - - - - string - - + + + + boolean + - - + + + _c6 + + + _col6 + + - - 86 + + int - - - - - - - - FIL_4 + + + + + + + + + + + key + + + src + + + + + string + + + + + + + + + + + + 86 + + + + + + + + + + + + + + + + + FIL_4 + + + + - - - - - - - - - - - - - - - key - - - src - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/groupby1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby1.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/groupby1.q.xml (working copy) @@ -300,297 +300,289 @@ src - - - - - - - - + + + + + + + + + + + + KEY._col0 + + + _col0 + + + + + + + + string + + + + + + + VALUE._col0 + + + _col1 + + + + + + + + double + + + + + + + + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + _col0 + + + + + + + _col0 + + + + + + + + -1 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + double + + + escape.delim + \ + + + + + + + + + RS_3 + + + + + + + + + + + + + + KEY._col0 + + + + + + string + + + + + + + VALUE._col0 + + + + + + double + + + + + + + + + + + + + + _col0 + + + key + + + src + + + + + + + + + + + - - - - - KEY._col0 - - - _col0 - - - - - - - - string - - - - - - - VALUE._col0 - - - _col1 - - - - - - - - double - - - - - - + + + org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - _col0 - - - - - - - _col0 - - - - - - - - -1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - double - - - escape.delim - \ - - - - - + + sum + + + + PARTIAL1 - - RS_3 - - + - - - - - - - - - - - - KEY._col0 - - - - - - string - - - - - - - VALUE._col0 - - - - - - double - - - - - - - - - - - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble - - - sum - - - - PARTIAL1 - - - - - - - - - - - - value - - - src - - - - - + + + + + + + value - - - - - - int - - - - - 5 - - + + src + + + - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr + + + + + + int + + - - substr + + 5 - - + + + + + + org.apache.hadoop.hive.ql.udf.UDFSubstr + + substr + + + + @@ -597,266 +589,262 @@ - - 0.5 - - - 1 - - - - - - - - - - - - - 0.9 - - - - HASH - - - - - - _col0 - - - _col1 - - - - - GBY_2 + + 0.5 - + + 1 + + - + - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - double - - - - - - + + - - - - - - - - BLOCK__OFFSET__INSIDE__FILE - - - BLOCK__OFFSET__INSIDE__FILE + + 0.9 - - true - - - src - - - - - bigint - + + + HASH - - - - INPUT__FILE__NAME - - - INPUT__FILE__NAME - - - true - - - src - - - - - - - - value - - - value - - - src - - - - - - - - key - - - key - - - src - - - - - - - - - - - - - - - - key + + + + _col0 - - src + + _col1 - - - - - - - value - - - src - - - - - - - - - - key - - - value - - + + GBY_2 - - true + + + + - - - - SEL_1 - - - - - - - - - - - - - - - - key + + + + + + + + _col0 + + + + + + + + + string + + - - src + + + + _col1 + + + + + + + + + double + + - - - - - string - - - - - value - - - src - - - - - - string - - - - - - + + + + + BLOCK__OFFSET__INSIDE__FILE + + + BLOCK__OFFSET__INSIDE__FILE + + + true + + + src + + + + + bigint + + + + + + + INPUT__FILE__NAME + + + INPUT__FILE__NAME + + + true + + + src + + + + + + + + value + + + value + + + src + + + + + + + + key + + + key + + + src + + + + + + + + + + + + + + + + key + + + src + + + + + + + + + + value + + + src + + + + + + + + + + + + key + + + value + + + + + true + + + + + SEL_1 + + + + + + + + + + + + + + key + + + src + + + + + + string + + + + + + + value + + + src + + + + + + string + + + + + + + + + @@ -1139,207 +1127,199 @@ - - - - - - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - + + + + + + + + + 1 - - FS_6 + + #### A masked pattern was here #### - - - - - - + + true - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - + + - - - - - - - - _col1 - - - _col1 + + 150 - - + + 1 - - + + #### A masked pattern was here #### - - - - _col0 - - - _col0 + + true - - + + - - + + 1 - - - - - - - - - - - - - + + FS_6 - - - - _col0 - - - _col1 - - + + + + - - - - SEL_5 - - - - - - - - - - - - - - - - _col0 + + + + + + + + key + + + + + + + + + string + + - - src + + + + value + + + + + + + + + string + + - - - - - string - - - - - _col1 - - - - - - double - - - - - - + + + + + _col1 + + + _col1 + + + + + + + + + + + _col0 + + + _col0 + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_5 + + + + + + + + + + + + + + _col0 + + + src + + + + + + string + + + + + + + _col1 + + + + + + double + + + + + + + + + @@ -1428,11 +1408,9 @@ GBY_4 - - - - - + + + Index: ql/src/test/results/compiler/plan/groupby2.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby2.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/groupby2.q.xml (working copy) @@ -153,529 +153,521 @@ src - - - - - - - - - + + + + + + + + + + + + KEY._col0 + + + _col0 + + + + + + + + string + + + + + + + VALUE._col1 + + + _col3 + + + + + + + + double + + + + + + + KEY._col1:0._col0 + + + _col1 + + + + + + + + + + + VALUE._col0 + + + _col2 + + + + + + + + bigint + + + + + + + + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + ++ + + + columns.types + string,uniontype<struct<_col0:string>> + + + + + + + 1 + + + -1 + + + + + _col0 + + + _col1 + + + + + + + _col0 + + + _col1 + + + + + + + + + + + + -1 + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + bigint,double + + + escape.delim + \ + + + + + + + + + RS_3 + + - - - - - KEY._col0 - - - _col0 - - - - - - - - string - - - - + + + + + + + + + + + KEY._col0 + + + + + + string + + + + + + + KEY._col1:0._col0 + + + + + + string + + + + + + + VALUE._col0 + + + + + + bigint + + + + + + + VALUE._col1 + + + + + + double + + + + + + + + + + + + + + _col1 + + + + + + + value - - VALUE._col1 - - - _col3 - - - - - - - - double - - - - + + src - - KEY._col1:0._col0 - - - _col1 - - - - - - - - + + - - VALUE._col0 - - - _col2 - - - - - - - - bigint - - - - - - - - - - - - - 1 - - + + + + + + int - - - - - - - - - + + 5 - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - ++ - - - columns.types - string,uniontype<struct<_col0:string>> - - - - - - - 1 - - - -1 - - - - - _col0 - - - _col1 - - - - - - - _col0 - - - _col1 - - - - - - - - - - - - -1 - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - bigint,double - - - escape.delim - \ - - - - - - - RS_3 + + + + + + org.apache.hadoop.hive.ql.udf.UDFSubstr - - - - - - + + substr - - - - - - - - KEY._col0 - - - - - - string - - - - - - - KEY._col1:0._col0 - - - - - - string - - - - - - - VALUE._col0 - - - - - - bigint - - - - - - - VALUE._col1 - - - - - - double - - - - - - - + + + - - - - _col1 - - - - - - - value - - - src - - - - - + + _col0 + + + + + + + key - - - - - - int - - - - - 5 - - + + src - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr + + - - substr - - - - - - - - _col0 - - - - - - - key - - - src - - - - - + + + + - - - - - - - 1 - - + + 1 - - - - - - - 1 - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr + + + + - - substr + + 1 - - + + + + + + org.apache.hadoop.hive.ql.udf.UDFSubstr + + substr + + + + - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCount$GenericUDAFCountEvaluator - - - count - - - - PARTIAL1 - - - - - - - - - - - - value - - - src - - - - - + + + + + + + + + + true + + + org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCount$GenericUDAFCountEvaluator + + + count + + + + PARTIAL1 + + + + + + + + + + + + value - - - - - - - 5 - - + + src + + + - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr + + + + - - substr + + 5 - - + + + + + + org.apache.hadoop.hive.ql.udf.UDFSubstr + + substr + + + + - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble - - - sum - - - - - - - - - - - - - - value - - - src - - - - - + + + + + + org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble + + + sum + + + + + + + + + + + + + + value - - - - - - - 5 - - + + src + + + - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr + + + + - - substr + + 5 - - + + + + + + org.apache.hadoop.hive.ql.udf.UDFSubstr + + substr + + + + @@ -682,306 +674,302 @@ - - true - - - 0.5 - - - 1 - - - - - - - - - - - - - - - - 0.9 - - - - HASH - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - - GBY_2 + + true - + + 0.5 + + + 1 + + - + - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - _col2 - - - - - - - - - bigint - - - - - - - _col3 - - - - - - - - - double - - - - + + - - - - - - - - BLOCK__OFFSET__INSIDE__FILE - - - BLOCK__OFFSET__INSIDE__FILE + + - - true + + 0.9 - - src + + + HASH + - - - - - - - INPUT__FILE__NAME - - - INPUT__FILE__NAME - - - true - - - src - - - - - - - - value - - - value - - - src - - - - - - - - key - - - key - - - src - - - - - - - - - - - - - - - - key + + + + _col0 - - src + + _col1 - - + + _col2 - - - - - - value + + _col3 - - src - - - - - - - - key - - - value - - + + GBY_2 - - true + + + + - - - - SEL_1 - - - - - - - - - - - - - - - - key + + + + + + + + _col0 + + + + + + + + + string + + - - src + + + + _col1 + + + + + + + + + string + + - - + + + + _col2 + + + + + + + + + bigint + + - - string + + + + _col3 + + + + + + + + + double + + - - - - value - - - src - - - - - - string - - - - - - + + + + + BLOCK__OFFSET__INSIDE__FILE + + + BLOCK__OFFSET__INSIDE__FILE + + + true + + + src + + + + + + + + INPUT__FILE__NAME + + + INPUT__FILE__NAME + + + true + + + src + + + + + + + + value + + + value + + + src + + + + + + + + key + + + key + + + src + + + + + + + + + + + + + + + + key + + + src + + + + + + + + + + value + + + src + + + + + + + + + + + + key + + + value + + + + + true + + + + + SEL_1 + + + + + + + + + + + + + + key + + + src + + + + + + string + + + + + + + value + + + src + + + + + + string + + + + + + + + + @@ -1258,312 +1246,304 @@ - - - - - - - - - - - #### A masked pattern was here #### + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat - - 1 + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + hive.serialization.extend.nesting.levels + true - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0,_col1,_col2 - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:bigint:string - - - escape.delim - \ - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - - 1 - - - - - FS_6 - - - - - - - - - - - - - - - - _col0 - - - - - - - - - string - - + + serialization.format + 1 - - - - _col1 - - - - - - - - - bigint - - + + columns.types + string:bigint:string - - - - _col2 - - - - - - - - - string - - + + escape.delim + \ + + 1 + - - - - - - _col2 - - + + FS_6 + + + + + + + + + - - + + _col0 - + + + string + - - + + + _col1 + + + + + + + + + bigint + + + + + + _col2 - - + + + + string + - - - - - - - - _col1 - - - _col1 - - - - - - - - - - - _col0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - - + + + + + _col2 + + + + + + + _col0 + + + + + + + + + + + + + _col2 + + + + + + + + + + + + + + + + + + + + + _col1 + + + _col1 + + + + + + + + + + + _col0 + + + _col0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + _col2 + + + + + + + SEL_5 + + + + - - SEL_5 - - - - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - string - - - - - - - _c1 - - - _col1 - - - - - - bigint - - - - - - - _c2 - - - _col2 - - - - - - string - - - - - - - - - - + + + + + + + + + _c0 + + + _col0 + + + + + + string + + + + + + + _c1 + + + _col1 + + + + + + bigint + + + + + + + _c2 + + + _col2 + + + + + + string + + + + + + + + + @@ -1690,11 +1670,9 @@ GBY_4 - - - - - + + + Index: ql/src/test/results/compiler/plan/groupby3.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby3.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/groupby3.q.xml (working copy) @@ -153,366 +153,543 @@ src - - - - - - - - - + + + + + + + + + + + + VALUE._col4 + + + _col5 + + + + + + + + string + + + + + + + VALUE._col3 + + + _col4 + + + + + + + + + + + VALUE._col2 + + + _col3 + + + + + + + + + + count + + + sum + + + + + + + + + bigint + + + + + + + double + + + + + + + + + + + VALUE._col1 + + + _col2 + + + + + + + + + + + VALUE._col0 + + + _col1 + + + + + + + + + + + KEY._col0:0._col0 + + + _col0 + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + uniontype<struct<_col0:string>> + + + + + + + 1 + + + + + _col0 + + + + + + + _col0 + + + _col1 + + + _col2 + + + _col3 + + + _col4 + + + + + + + + -1 + + + + + + + + + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1,_col2,_col3,_col4 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + double,struct<count:bigint,sum:double>,struct<count:bigint,sum:double>,string,string + + + escape.delim + \ + + + + + + + + + RS_3 + + - - - - - VALUE._col4 - - - _col5 - - - - - - - - string - - - - + + + + + + + + + + + KEY._col0:0._col0 + + + + + + string + + + + + + + VALUE._col0 + + + + + + double + + + + + + + VALUE._col1 + + + + + + struct<count:bigint,sum:double> + + + + + + + VALUE._col2 + + + + + + struct<count:bigint,sum:double> + + + + + + + VALUE._col3 + + + + + + string + + + + + + + VALUE._col4 + + + + + + string + + + + + + + + + + + + + + _col0 + + + + + + + value - - VALUE._col3 - - - _col4 + + src + + + + + + + + + + + + int - - - - - - - - VALUE._col2 - - - _col3 - - - - - - - - - - count + + 5 + + + + + + + + + org.apache.hadoop.hive.ql.udf.UDFSubstr + + + substr + + + + + + + + + + + + + + + + + + org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble + + + sum + + + + PARTIAL1 + + + + + + + + + + + + value - - sum + + src + + + - - - - - - bigint - - + + + + - - - - double - - + + 5 - - - - VALUE._col1 - - - _col2 - - - - - - - - - - - VALUE._col0 - - - _col1 - - - - - - - - - - - KEY._col0:0._col0 - - - _col0 - - - - - - - - - - - - - - - - - - - 0 + + + + org.apache.hadoop.hive.ql.udf.UDFSubstr - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 + + substr - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - uniontype<struct<_col0:string>> - - - - - 1 - - - - - _col0 + + - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - _col4 - - - - - - - - -1 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1,_col2,_col3,_col4 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - double,struct<count:bigint,sum:double>,struct<count:bigint,sum:double>,string,string - - - escape.delim - \ - - - - - - - RS_3 + + + + + + org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage$GenericUDAFAverageEvaluatorDouble - + + avg + + + + + - - - - - - - - - - - - KEY._col0:0._col0 + + + + + + + value + + + src + + + + + - - + + + + + + + 5 + + - - string - - - - - VALUE._col0 + + + + org.apache.hadoop.hive.ql.udf.UDFSubstr - - + + substr - - double - - - - - VALUE._col1 - - - - - - struct<count:bigint,sum:double> - - + + - - - - VALUE._col2 - - - - - - struct<count:bigint,sum:double> - - - - - - - VALUE._col3 - - - - - - string - - - - - - - VALUE._col4 - - - - - - string - - - @@ -519,371 +696,186 @@ - - - - - - _col0 - - - - - - - value - - - src - - - - - - - - - - - - int - - - - - 5 - - - - + + + + true - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - + + org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage$GenericUDAFAverageEvaluatorDouble - - + + avg - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble - - - sum - - - - PARTIAL1 - - - - - - - - - - - - value - - - src - - - - - + + + + + + + + + + + + + value - - - - - - - 5 - - + + src + + + - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr + + + + - - substr + + 5 - - + + + + + + org.apache.hadoop.hive.ql.udf.UDFSubstr + + substr + + + + - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage$GenericUDAFAverageEvaluatorDouble - - - avg - - - - - - - - - - - - - - value - - - src - - - - - + + + + + + org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMax$GenericUDAFMaxEvaluator + + + max + + + + + + + + + + + + + + value - - - - - - - 5 - - + + src + + + - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr + + + + - - substr + + 5 - - - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage$GenericUDAFAverageEvaluatorDouble - - - avg - - - - - - - - - - - - - - value - - - src - - - - - - - - - - - - - 5 - - - - + + + + org.apache.hadoop.hive.ql.udf.UDFSubstr - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - + + substr - - - + + + - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMax$GenericUDAFMaxEvaluator - - - max - - - - - - - - - - - - - - value - - - src - - - - - + + + + + + org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMin$GenericUDAFMinEvaluator + + + min + + + + + + + + + + + + + + value - - - - - - - 5 - - + + src + + + - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr + + + + - - substr + + 5 - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMin$GenericUDAFMinEvaluator - - - min - - - - - - - - - - - - - - value - - - src - - - - - - - - - - - - - 5 - - - - + + + + org.apache.hadoop.hive.ql.udf.UDFSubstr - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - + + substr - - - + + + @@ -890,164 +882,162 @@ - - true + + + + true + + + 0.5 + + + + + - - 0.5 + + + + + + + 0.9 + + + + HASH + + + + + + _col0 - - - - - - + + _col1 - - + + _col2 - - 0.9 + + _col3 - - - HASH - + + _col4 - - - + + _col5 + + + + + + + GBY_2 + + + + + + + + + + + + + _col0 - + + + + + + + + string + + + + + + _col1 - + + + + + + + + double + + + + + + _col2 - + + + + + + + + struct<count:bigint,sum:double> + + + + + + _col3 - - _col4 + + - - _col5 + + + + struct<count:bigint,sum:double> + - - - - GBY_2 - - - - - - - - - - - - - - - _col0 - - - - - - - - - string - - + + + _col4 - - - - _col1 - - - - - - - - - double - - + + - - - - _col2 - - - - - - - - - struct<count:bigint,sum:double> - - + + - - - - _col3 - - - - - - - - - struct<count:bigint,sum:double> - - + + string - - - - _col4 - - - - - - - - - string - - + + + + + + _col5 - - - - _col5 - - - - - - - - - string - - + + + + + + + string + @@ -1056,140 +1046,138 @@ - - - - BLOCK__OFFSET__INSIDE__FILE - - - BLOCK__OFFSET__INSIDE__FILE - - - true - - - src - - - - - - - - INPUT__FILE__NAME - - - INPUT__FILE__NAME - - - true - - - src - - - - - - - - value - - - value - - - src - - - - - - - - key - - - key - - - src - - - - - - - + + + + + BLOCK__OFFSET__INSIDE__FILE + + + BLOCK__OFFSET__INSIDE__FILE + + + true + + + src + + + + + + + + INPUT__FILE__NAME + + + INPUT__FILE__NAME + + + true + + + src + + + + + + + + value + + + value + + + src + + + + + + + + key + + + key + + + src + + + + + + + + + + + + + + + + value + + + src + + + + + + + + + + + + value + + + + + true + + + + + SEL_1 + + + + - - - - - - - - value - - - src - - - - - - - - - - - - value - - - - - true - - - - - SEL_1 - - - - - - - - - - - - - - - - value - - - src - - - - - - string - - - - - - - - - - + + + + + + + + + value + + + src + + + + + + string + + + + + + + + + @@ -1473,389 +1461,381 @@ - - - - - - - - - - - #### A masked pattern was here #### + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat - - 1 + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + hive.serialization.extend.nesting.levels + true - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0,_col1,_col2,_col3,_col4 - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3,_col4 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - double:double:double:string:string - - - escape.delim - \ - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + serialization.format + 1 + + + columns.types + double:double:double:string:string + + + escape.delim + \ + - - 1 - - - FS_6 + + 1 - + + + + FS_6 + + + + + + + + + - + + + _col0 + + + + + + + + + double + + - - - - - - - - - - _col0 - - - - - - - - - double - - + + + + _col1 - - - - _col1 - - - - - - - - - double - - + + - - - - _col2 - - - - - - - - - double - - + + - - - - _col3 - - - - - - - - - string - - + + double - - - - _col4 - - - - - - - - - string - - + + + + + + _col2 + + + + + + + + double + - - - - - - - - - - _col4 - - - _col4 - - - - - - - - - - - _col3 - - - _col3 - - - - - - - - - - - _col2 - - - _col2 - - - - - - - - - - - _col1 - - - _col1 - - - - - - - - - - - _col0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - _col4 - - - - - - - SEL_5 - - - - - - - - - - - - - - - - _c0 + + + + _col3 + + + + + + + + + string + + - - _col0 + + + + _col4 + + + + + + + + + string + + - - - - - double - - - - - _c1 - - - _col1 - - - - - - double - - - - - - - _c2 - - - _col2 - - - - - - double - - - - - - - _c3 - - - _col3 - - - - - - string - - - - - - - _c4 - - - _col4 - - - - - - string - - - - - - + + + + + _col4 + + + _col4 + + + + + + + + + + + _col3 + + + _col3 + + + + + + + + + + + _col2 + + + _col2 + + + + + + + + + + + _col1 + + + _col1 + + + + + + + + + + + _col0 + + + _col0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + _col2 + + + _col3 + + + _col4 + + + + + + + SEL_5 + + + + + + + + + + + + + + _c0 + + + _col0 + + + + + + double + + + + + + + _c1 + + + _col1 + + + + + + double + + + + + + + _c2 + + + _col2 + + + + + + double + + + + + + + _c3 + + + _col3 + + + + + + string + + + + + + + _c4 + + + _col4 + + + + + + string + + + + + + + + + @@ -2053,11 +2033,9 @@ GBY_4 - - - - - + + + Index: ql/src/test/results/compiler/plan/groupby4.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby4.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/groupby4.q.xml (working copy) @@ -153,441 +153,296 @@ src - - - - - - - - - + + + + + + + + + + + + KEY._col0 + + + _col0 + + + + + + + + string + + + + + + + + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + _col0 + + + + + + + + + + + -1 + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + + + + escape.delim + \ + + + + + + + + + RS_3 + + - - - - - KEY._col0 - - - _col0 - - - - - - - - string - - - - + + + + + + + + + + + KEY._col0 + + + + + + string + + + + + + + + + + + + + + _col0 + + + + + + + key - - - - - - + + src - - - - - - + + - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - _col0 - - - - - - - - - - - -1 - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - - - - escape.delim - \ - - - - - - - RS_3 - - - - - - - - - - - - - - - - KEY._col0 - - - - - - string - - + + + + + + int - - - - - - - - - - _col0 - - - - - - - key - - - src - - - - - + + 1 - - - - - - int - - - - - 1 - - - - - - - - - - 1 - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr + + + + - - substr + + 1 - - - - - - - - - - - - 0.5 - - - 1 - - - - - + + + + org.apache.hadoop.hive.ql.udf.UDFSubstr - - - - - - - 0.9 - - - - HASH - - - - - - _col0 + + substr - - - - GBY_2 - - - - - + + - - - - - - - - _col0 - - - - - - - - - string - - - - - - - - - - - - - BLOCK__OFFSET__INSIDE__FILE - - - BLOCK__OFFSET__INSIDE__FILE + + + + - - true + + 0.5 - - src + + 1 - - - - bigint + + + + - - - - INPUT__FILE__NAME - - - INPUT__FILE__NAME + + - - true + + 0.9 - - src + + + HASH + - - - - - - - value - - - value - - - src - - - - - - - - key - - - key - - - src - - - - - - - - - - - - - - - - key + + + + _col0 - - src - - - - - - - - key - - + + GBY_2 - - true + + + + - - - - SEL_1 - - - - - - - - - - - - - - - - key + + + + + + + + _col0 + + + + + + + + + string + + - - src - - - - - - string - @@ -594,9 +449,142 @@ - - - + + + + + BLOCK__OFFSET__INSIDE__FILE + + + BLOCK__OFFSET__INSIDE__FILE + + + true + + + src + + + + + bigint + + + + + + + INPUT__FILE__NAME + + + INPUT__FILE__NAME + + + true + + + src + + + + + + + + value + + + value + + + src + + + + + + + + key + + + key + + + src + + + + + + + + + + + + + + + + key + + + src + + + + + + + + + + + + key + + + + + true + + + + + SEL_1 + + + + + + + + + + + + + + key + + + src + + + + + + string + + + + + + + + + @@ -880,100 +868,94 @@ - - - - - - - - - - - #### A masked pattern was here #### + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat - - 1 + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + hive.serialization.extend.nesting.levels + true - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0 - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string - - - escape.delim - \ - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + serialization.format + 1 + + + columns.types + string + + + escape.delim + \ + - - 1 - - - FS_6 + + 1 - + + + + FS_6 + + + + + + + + + - - - - - - - - - - - - _col0 - - - - - - - - - string - - + + + _col0 + + + + + + + + string + @@ -982,79 +964,77 @@ - - - - _col0 - - - _col0 - - - - - - - - - - + + + + + _col0 + + + _col0 + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + SEL_5 + + + + - - - - - - - - - - - - - _col0 - - - - - - - SEL_5 - - - - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - string - - - - - - - - - - + + + + + + + + + _c0 + + + _col0 + + + + + + string + + + + + + + + + @@ -1110,11 +1090,9 @@ GBY_4 - - - - - + + + Index: ql/src/test/results/compiler/plan/groupby5.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby5.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/groupby5.q.xml (working copy) @@ -153,297 +153,289 @@ src - - - - - - - - + + + + + + + + + + + + KEY._col0 + + + _col0 + + + + + + + + string + + + + + + + VALUE._col0 + + + _col1 + + + + + + + + double + + + + + + + + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + _col0 + + + + + + + _col0 + + + + + + + + -1 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + double + + + escape.delim + \ + + + + + + + + + RS_3 + + + + + + + + + + + + + + KEY._col0 + + + + + + string + + + + + + + VALUE._col0 + + + + + + double + + + + + + + + + + + + + + _col0 + + + key + + + src + + + + + + + + + + + - - - - - KEY._col0 - - - _col0 - - - - - - - - string - - - - - - - VALUE._col0 - - - _col1 - - - - - - - - double - - - - - - + + + org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - _col0 - - - - - - - _col0 - - - - - - - - -1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - double - - - escape.delim - \ - - - - - + + sum + + + + PARTIAL1 - - RS_3 - - + - - - - - - - - - - - - KEY._col0 - - - - - - string - - - - - - - VALUE._col0 - - - - - - double - - - - - - - - - - - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble - - - sum - - - - PARTIAL1 - - - - - - - - - - - - value - - - src - - - - - + + + + + + + value - - - - - - int - - - - - 5 - - + + src + + + - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr + + + + + + int + + - - substr + + 5 - - + + + + + + org.apache.hadoop.hive.ql.udf.UDFSubstr + + substr + + + + @@ -450,266 +442,262 @@ - - 0.5 - - - 1 - - - - - - - - - - - - - 0.9 - - - - HASH - - - - - - _col0 - - - _col1 - - - - - GBY_2 + + 0.5 - + + 1 + + - + - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - double - - - - - - + + - - - - - - - - BLOCK__OFFSET__INSIDE__FILE - - - BLOCK__OFFSET__INSIDE__FILE + + 0.9 - - true - - - src - - - - - bigint - + + + HASH - - - - INPUT__FILE__NAME - - - INPUT__FILE__NAME - - - true - - - src - - - - - - - - value - - - value - - - src - - - - - - - - key - - - key - - - src - - - - - - - - - - - - - - - - key + + + + _col0 - - src + + _col1 - - - - - - - value - - - src - - - - - - - - - - key - - - value - - + + GBY_2 - - true + + + + - - - - SEL_1 - - - - - - - - - - - - - - - - key + + + + + + + + _col0 + + + + + + + + + string + + - - src + + + + _col1 + + + + + + + + + double + + - - - - - string - - - - - value - - - src - - - - - - string - - - - - - + + + + + BLOCK__OFFSET__INSIDE__FILE + + + BLOCK__OFFSET__INSIDE__FILE + + + true + + + src + + + + + bigint + + + + + + + INPUT__FILE__NAME + + + INPUT__FILE__NAME + + + true + + + src + + + + + + + + value + + + value + + + src + + + + + + + + key + + + key + + + src + + + + + + + + + + + + + + + + key + + + src + + + + + + + + + + value + + + src + + + + + + + + + + + + key + + + value + + + + + true + + + + + SEL_1 + + + + + + + + + + + + + + key + + + src + + + + + + string + + + + + + + value + + + src + + + + + + string + + + + + + + + + @@ -986,116 +974,110 @@ - - - - - - - - - - - #### A masked pattern was here #### + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat - - 1 + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + hive.serialization.extend.nesting.levels + true - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0,_col1 - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:double - - - escape.delim - \ - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + serialization.format + 1 + + + columns.types + string:double + + + escape.delim + \ + - - 1 - - - FS_6 + + 1 - + + + + FS_6 + + + + + + + + + - + + + _col0 + + + + + + + + + string + + - - - - - - - - - - _col0 - - - - - - - - - string - - + + + + _col1 - - - - _col1 - - - - - - - - - double - - + + + + + + + double + @@ -1104,118 +1086,116 @@ - - - - _col1 - - - _col1 - - - - - - - - - - - _col0 - - - _col0 - - - - - - - - - - + + + + + _col1 + + + _col1 + + + + + + + + + + + _col0 + + + _col0 + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_5 + + + + - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_5 - - - - - - - - - - - - - - - - key - - - _col0 - - - src - - - - - - string - - - - - - - _c1 - - - _col1 - - - - - - double - - - - - - - - - - + + + + + + + + + key + + + _col0 + + + src + + + + + + string + + + + + + + _c1 + + + _col1 + + + + + + double + + + + + + + + + @@ -1304,11 +1284,9 @@ GBY_4 - - - - - + + + Index: ql/src/test/results/compiler/plan/groupby6.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby6.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/groupby6.q.xml (working copy) @@ -153,441 +153,296 @@ src - - - - - - - - - + + + + + + + + + + + + KEY._col0 + + + _col0 + + + + + + + + string + + + + + + + + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + _col0 + + + + + + + + + + + -1 + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + + + + escape.delim + \ + + + + + + + + + RS_3 + + - - - - - KEY._col0 - - - _col0 - - - - - - - - string - - - - + + + + + + + + + + + KEY._col0 + + + + + + string + + + + + + + + + + + + + + _col0 + + + + + + + value - - - - - - + + src - - - - - - + + - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - _col0 - - - - - - - - - - - -1 - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - - - - escape.delim - \ - - - - - - - RS_3 - - - - - - - - - - - - - - - - KEY._col0 - - - - - - string - - + + + + + + int - - - - - - - - - - _col0 - - - - - - - value - - - src - - - - - + + 5 - - - - - - int - - - - - 5 - - - - - - - - - - 1 - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr + + + + - - substr + + 1 - - - - - - - - - - - - 0.5 - - - 1 - - - - - + + + + org.apache.hadoop.hive.ql.udf.UDFSubstr - - - - - - - 0.9 - - - - HASH - - - - - - _col0 + + substr - - - - GBY_2 - - - - - + + - - - - - - - - _col0 - - - - - - - - - string - - - - - - - - - - - - - BLOCK__OFFSET__INSIDE__FILE - - - BLOCK__OFFSET__INSIDE__FILE + + + + - - true + + 0.5 - - src + + 1 - - - - bigint + + + + - - - - INPUT__FILE__NAME - - - INPUT__FILE__NAME + + - - true + + 0.9 - - src + + + HASH + - - - - - - - value - - - value - - - src - - - - - - - - key - - - key - - - src - - - - - - - - - - - - - - - - value + + + + _col0 - - src - - - - - - - - value - - + + GBY_2 - - true + + + + - - - - SEL_1 - - - - - - - - - - - - - - - - value + + + + + + + + _col0 + + + + + + + + + string + + - - src - - - - - - string - @@ -594,9 +449,142 @@ - - - + + + + + BLOCK__OFFSET__INSIDE__FILE + + + BLOCK__OFFSET__INSIDE__FILE + + + true + + + src + + + + + bigint + + + + + + + INPUT__FILE__NAME + + + INPUT__FILE__NAME + + + true + + + src + + + + + + + + value + + + value + + + src + + + + + + + + key + + + key + + + src + + + + + + + + + + + + + + + + value + + + src + + + + + + + + + + + + value + + + + + true + + + + + SEL_1 + + + + + + + + + + + + + + value + + + src + + + + + + string + + + + + + + + + @@ -880,100 +868,94 @@ - - - - - - - - - - - #### A masked pattern was here #### + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat - - 1 + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + hive.serialization.extend.nesting.levels + true - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0 - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string - - - escape.delim - \ - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + serialization.format + 1 + + + columns.types + string + + + escape.delim + \ + - - 1 - - - FS_6 + + 1 - + + + + FS_6 + + + + + + + + + - - - - - - - - - - - - _col0 - - - - - - - - - string - - + + + _col0 + + + + + + + + string + @@ -982,79 +964,77 @@ - - - - _col0 - - - _col0 - - - - - - - - - - + + + + + _col0 + + + _col0 + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + SEL_5 + + + + - - - - - - - - - - - - - _col0 - - - - - - - SEL_5 - - - - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - string - - - - - - - - - - + + + + + + + + + _c0 + + + _col0 + + + + + + string + + + + + + + + + @@ -1110,11 +1090,9 @@ GBY_4 - - - - - + + + Index: ql/src/test/results/compiler/plan/input1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input1.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/input1.q.xml (working copy) @@ -79,137 +79,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest1 + + + columns.types + string:string + + + serialization.ddl + struct dest1 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_6 + + + + - - FS_6 - - - - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + string + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -644,280 +640,268 @@ src - - - - - - - - - + + + + + + + + + + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_3 + + - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - + - - - - - - _col1 - - - value - - - src - - - - - + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src - - - - - + + src + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src + + + - - SEL_2 - - + + + + + - + + + + - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_2 + + + + + + + + + - - - key + + + _col0 src - + + + string + - - - - - int - - + + + _col1 - - 100 + + src + + + + + string + - - - - - - - boolean - - - - - FIL_4 + + + + + + + + + + + key + + + src + + + + + + + + + + + + int + + + + + 100 + + + + + + + + + + + + boolean + + + + + + + + + FIL_4 + + + + - - - - - - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src + + + + + + string + + + + + + + value + + + src + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/input2.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input2.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/input2.q.xml (working copy) @@ -79,137 +79,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest1 + + + columns.types + string:string + + + serialization.ddl + struct dest1 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_11 + + + + - - FS_11 - - - - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + string + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -566,133 +562,129 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest2 - - - columns.types - string:string - - - serialization.ddl - struct dest2 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest2 + + + columns.types + string:string + + + serialization.ddl + struct dest2 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_13 + + + + - - FS_13 - - - - - - - - - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -1049,137 +1041,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest3 - - - columns.types - string:string - - - serialization.ddl - struct dest3 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - partition_columns - ds/hr - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest3 + + + columns.types + string:string + + + serialization.ddl + struct dest3 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + partition_columns + ds/hr + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_15 + + + + - - FS_15 - - - - - - - - - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -1623,804 +1611,772 @@ src - - - - - - - - - + + + + + + + + + + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_3 + + - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - + - - - - - - _col1 - - - value - - - src - - - - - + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src - - - - - + + src + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src - - true + + - - SEL_2 - - + + + + + - + + + + - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - + + + + _col0 + + _col1 + + + true + - - - - - - - + + SEL_2 + + + + + + + + + - - - key + + + _col0 src - + + + string + - - - - - int - - + + + _col1 - - 100 + + src + + + + + string + - - - - - - - boolean - - - - - FIL_1 + + + + + + + + + + + key + + + src + + + + + + + + + + + + int + + + + + 100 + + + + + + + + + + + + boolean + + + + + + + + + FIL_1 + + + + - - - - + + + + + + + + + key + + + src + + + + + + string + + + + + + + value + + + src + + + + + + string + + + + + + + + + + + + + + + + + + + + + 2 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_6 + + + + + + + + + + + - - - - - - - - - - key + + + + _col1 + + + value src - + - - string - - - - - value + + _col0 + + + key src - + - - string - - - - - - - - - - - - + + + - - - - - 2 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_6 - - - - - - - - - - - - + - - - - - - _col1 - - - value - - - src - - - - - + + - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_5 - - + - + _col0 - - - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - + + _col1 - - - - - - - + + SEL_5 + + + + + + + + + - - - - - - - key - - - src - - - - - - - - - - - - - 100 - - - - + + + _col0 - - + + src - - + + + + string + - - - - - - - key - - - src - - - - - - - - - - - - - 200 - - - - + + + _col1 - - + + src - - + + + + string + - - - - - - - - FIL_4 + + + + + + + + + + + + + + + key + + + src + + + + + + + + + + + + + 100 + + + + + + + + + + + + + + + + + + + + + key + + + src + + + + + + + + + + + + + 200 + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_4 + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 3 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + ds=2008-04-08/hr=12/ + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_9 + + - - - - - 3 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - ds=2008-04-08/hr=12/ - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_9 - - - - - - - - - - - - + - - - - - - _col1 - - - - - - 2 - - + + + + + + + + + + + _col1 + + + - - _col0 - - - key - - - src - - - - - + + 2 - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src + + + - - SEL_8 - - + + + + + - + + + + - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - - - - int - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_8 + + + + + + + + + - - - key + + + _col0 src - + + + string + - - + + + _col1 + + - - 200 + + int - - - - - - - - FIL_7 + + + + + + + + + + + key + + + src + + + + + + + + + + + + + 200 + + + + + + + + + + + + + + + + + FIL_7 + + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + Index: ql/src/test/results/compiler/plan/input20.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input20.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/input20.q.xml (working copy) @@ -153,530 +153,518 @@ tmap:src - - - - - - - - - + + + + + + + + + + + + _col1 + + + _col1 + + + + + string + + + + + + + _col0 + + + _col0 + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + reducesinkkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + _col1 + + + + + + + + + _col0 + + + + + + + + + + -1 + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string,string + + + escape.delim + \ + + + + + + + + + RS_3 + + - - - - - _col1 - - - _col1 - - - - - string - - - - - - - _col0 - - - _col0 - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - - _col0 - - - - - - - - - - -1 - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - - - RS_3 - - - - - - - - - - - - - - - - key - - - _col0 - - - - - - string - - - - - - - value - - - _col1 - - - - - - string - - - - - - - - + - + + + + + + + + + key + + + _col0 + + + + + + string + + + + + + + value + + + _col1 + + + + + + string + + + + + + + + + + + + + + org.apache.hadoop.hive.ql.exec.TextRecordReader - - - - org.apache.hadoop.hive.ql.exec.TextRecordReader + + org.apache.hadoop.hive.ql.exec.TextRecordWriter + + + org.apache.hadoop.hive.ql.exec.TextRecordReader + + + cat + + + + + org.apache.hadoop.mapred.TextInputFormat - - org.apache.hadoop.hive.ql.exec.TextRecordWriter + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - org.apache.hadoop.hive.ql.exec.TextRecordReader - - - cat - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + field.delim + 9 - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + KEY - - - - field.delim - 9 - - - columns - KEY - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - - - - org.apache.hadoop.mapred.TextInputFormat + + serialization.format + 9 - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.DelimitedJSONSerDe - - - serialization.format - 9 - - - columns.types - double,double - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - - columns.types - string,string - - - serialization.last.column.takes.rest - true - - - - - - - SCR_2 - - - - - + + + + org.apache.hadoop.mapred.TextInputFormat - - - - - - + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - _col1 - - - - - - - key + + + + field.delim + 9 - - src + + columns + _col0,_col1 - - + + serialization.lib + org.apache.hadoop.hive.serde2.DelimitedJSONSerDe - - - - - - - - int - - + + serialization.format + 9 - - 5 + + columns.types + double,double - - - - - - - double + + + + org.apache.hadoop.mapred.TextInputFormat - - - - - - _col0 - - - - - - - key + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + field.delim + 9 - - src + + columns + _col0,_col1 - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - - - - + + serialization.format + 9 - - 2 + + columns.types + string,string + + serialization.last.column.takes.rest + true + - - - - - - - - - - - - - - - - - - - + + SCR_2 - - - - _col0 + + + + + + + + + - - _col1 - - - SEL_1 + + + + + _col1 + + + + + + + key + + + src + + + + + + + + + + + + int + + + + + 5 + + + + + + + + + + + + double + + + + + + + _col0 + + + + + + + key + + + src + + + + + + + + + + + + + 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_1 + + + + - - - - - - - - - - - - - - - _col0 - - - - - - double - - - - - - - _col1 - - - - - - double - - - - - - - - - - + + + + + + + + + _col0 + + + + + + double + + + + + + + _col1 + + + + + + double + + + + + + + + + @@ -977,288 +965,278 @@ - - - - - - - - - + + + + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + hive.serialization.extend.nesting.levels + true + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + serialization.format + 1 + + + columns.types + string:string + + + escape.delim + \ + + + + + + + 1 + + + + + FS_7 + + - - - - - #### A masked pattern was here #### - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string - - - escape.delim - \ - - - - - - - 1 - - + + + + + + + + + + + _col0 + + + + + + + + + string + + + + + + + _col1 + + + + + + + + + string + + + + + + + + + + + + + + org.apache.hadoop.hive.ql.exec.TextRecordReader + + + org.apache.hadoop.hive.ql.exec.TextRecordWriter + + + org.apache.hadoop.hive.ql.exec.TextRecordReader + + + uniq -c | sed "s@^ *@@" | sed "s@\t@_@" | sed "s@ @\t@" + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + field.delim + 9 - - FS_7 + + columns + KEY - - - - - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - + + serialization.format + 9 - - - - org.apache.hadoop.hive.ql.exec.TextRecordReader + + + + org.apache.hadoop.mapred.TextInputFormat - - org.apache.hadoop.hive.ql.exec.TextRecordWriter + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - org.apache.hadoop.hive.ql.exec.TextRecordReader - - - uniq -c | sed "s@^ *@@" | sed "s@\t@_@" | sed "s@ @\t@" - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + field.delim + 9 - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0,_col1 - - - - field.delim - 9 - - - columns - KEY - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - + + serialization.lib + org.apache.hadoop.hive.serde2.DelimitedJSONSerDe + + serialization.format + 9 + + + columns.types + string,string + - - - - org.apache.hadoop.mapred.TextInputFormat + + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + field.delim + 9 - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0,_col1 - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.DelimitedJSONSerDe - - - serialization.format - 9 - - - columns.types - string,string - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - - - - org.apache.hadoop.mapred.TextInputFormat + + serialization.format + 9 - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns.types + string,string - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - - columns.types - string,string - - - - - SCR_6 - - + + + + SCR_6 + + + + + + + + + - + + + key + + + _col0 + + + + + + string + + - - - - - - - - - - key - - - _col0 - - - - - - string - - + + + + value - - - - value - - - _col1 - - - - - - string - - + + _col1 + + + + + string + @@ -1267,109 +1245,107 @@ - - - - _col1 - - - _col1 - - - tmap - - - - - - - - _col0 - - - _col0 - - - tmap - - - - - - - + + + + + _col1 + + + _col1 + + + tmap + + + + + + + + _col0 + + + _col0 + + + tmap + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_5 + + + + - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_5 - - - - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - - - - + + + + + + + + + _col0 + + + + + + string + + + + + + + _col1 + + + + + + string + + + + + + + + + @@ -1392,11 +1368,9 @@ EX_4 - - - - - + + + Index: ql/src/test/results/compiler/plan/input3.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input3.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/input3.q.xml (working copy) @@ -79,137 +79,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest1 + + + columns.types + string:string + + + serialization.ddl + struct dest1 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_14 + + + + - - FS_14 - - - - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + string + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -566,133 +562,129 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest2 - - - columns.types - string:string - - - serialization.ddl - struct dest2 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest2 + + + columns.types + string:string + + + serialization.ddl + struct dest2 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_16 + + + + - - FS_16 - - - - - - - - - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -1049,137 +1041,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest3 - - - columns.types - string:string - - - serialization.ddl - struct dest3 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - partition_columns - ds/hr - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest3 + + + columns.types + string:string + + + serialization.ddl + struct dest3 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + partition_columns + ds/hr + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_18 + + + + - - FS_18 - - - - - - - - - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -1510,89 +1498,85 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + columns + _col0 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + serialization.format + 1 + + + columns.types + string + + + + + + + 1 + + + + + FS_20 + + + + - - FS_20 - - - - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - - - - + + + + + + + + + _col0 + + + + + + + + + string + + + + + + + + + @@ -1971,1051 +1955,1009 @@ src - - - - - - - - - + + + + + + + + + + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_3 + + - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - + - - - - - - _col1 - - - value - - - src - - - - - + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src - - - - - + + src + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src - - true + + - - SEL_2 - - + + + + + - + + + + - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - + + + + _col0 + + _col1 + + + true + - - - - - - - + + SEL_2 + + + + + + + + + - - - key + + + _col0 src - + + + string + - - - - - int - - + + + _col1 - - 100 + + src + + + + + string + - - - - - - - boolean - - - - - FIL_1 + + + + + + + + + + + key + + + src + + + + + + + + + + + + int + + + + + 100 + + + + + + + + + + + + boolean + + + + + + + + + FIL_1 + + + + - - - - + + + + + + + + + key + + + src + + + + + + string + + + + + + + value + + + src + + + + + + string + + + + + + + + + + + + + + + + + + + + + 2 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_6 + + + + + + + + + + + - - - - - - - - - - key + + + + _col1 + + + value src - + - - string - - - - - value + + _col0 + + + key src - + - - string - - - - - - - - - - - - + + + - - - - - 2 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_6 - - - - - - - - - - - - + - - - - - - _col1 - - - value - - - src - - - - - + + - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_5 - - + - + _col0 - - - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - + + _col1 - - - - - - - + + SEL_5 + + + + + + + + + - - - - - - - key - - - src - - - - - - - - - - - - - 100 - - - - + + + _col0 - - + + src - - + + + + string + - - - - - - - key - - - src - - - - - - - - - - - - - 200 - - - - + + + _col1 - - + + src - - + + + + string + - - - - - - - - FIL_4 + + + + + + + + + + + + + + + key + + + src + + + + + + + + + + + + + 100 + + + + + + + + + + + + + + + + + + + + + key + + + src + + + + + + + + + + + + + 200 + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_4 + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 3 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + ds=2008-04-08/hr=12/ + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_9 + + - - - - - 3 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - ds=2008-04-08/hr=12/ - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_9 - - - - - - - - - - - - + - - - - - - _col1 - - - - - - 2 - - + + + + + + + + + + + _col1 + + + - - _col0 - - - key - - - src - - - - - + + 2 - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src + + + - - SEL_8 - - + + + + + - + + + + - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - - - - int - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_8 + + + + + + + + + - - - - - - - key - - - src - - - - - - - - - - - - - 200 - - - - + + + _col0 - - + + src - - + + + + string + - - - - - - - key - - - src - - - - - - - - - - - - - 300 - - - - + + + _col1 - - + + - - + + int - - - - - - - - FIL_7 + + + + + + + + + + + + + + + key + + + src + + + + + + + + + + + + + 200 + + + + + + + + + + + + + + + + + + + + + key + + + src + + + + + + + + + + + + + 300 + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_7 + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + 4 + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_12 + + - - - - - 4 - - - #### A masked pattern was here #### - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_12 - - - - - - - - - - - - + - - - - - - _col0 - - - value - - - src - - - - - + + + + + + + + + + + _col0 + + + value - - - - - - - - - - + + src - - - - _col0 - - + + - - SEL_11 - - + + + + + - + - - - - - - - - value - - - _col0 - - - src - - - - - - string - - - - + + + + _col0 - - - - - - - + + SEL_11 + + + + + + + + + - - - key + + + value + + _col0 + src - + - - - - - - + + string - - 300 - - - - - - - - - FIL_10 + + + + + + + + + + + key + + + src + + + + + + + + + + + + + 300 + + + + + + + + + + + + + + + + + FIL_10 + + + + - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + Index: ql/src/test/results/compiler/plan/input4.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input4.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/input4.q.xml (working copy) @@ -300,269 +300,172 @@ tmap:src - - - - - - - - - + + + + + + + + + - - - - - - - - - _col1 - - - _col1 - - - - - string - - - - + + + + + _col1 + + + _col1 + + + + + string - + + + + + + _col0 + + + _col0 + + + + + + + + + + + + + + + + + + _col0 - - - _col0 - - - - - + + + - - - - + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + reducesinkkey0 - - - - - - _col0 - - - - - - - + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - + + serialization.sort.order + + - - 1 + + columns.types + string - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - - _col0 - - - - - - - - - - -1 - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - RS_3 + + + + 1 + + + -1 + + + + + reducesinkkey0 - - - - - - + + + + + + _col0 - - - - - - - - tkey - - - _col0 - - - - - - string - - - - - - - tvalue - - - _col1 - - - - - - string - - - - - - + + _col1 - - - - - - - - - - + + + + + + _col0 - - - - - - int - - - - - 100 - - + + - - + + + + -1 + + + + + - - - - boolean + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string,string + + + escape.delim + \ + @@ -570,264 +473,345 @@ - FIL_8 + RS_3 - - - - - + + + - + + + + + tkey + + + _col0 + + + + + + string + + + + + + + tvalue + + + _col1 + + + + + + string + + + + - + + + + + + + + + + + + + + + + int + + + + + 100 + + + + + + + + + + + + boolean + + + + + + + + + FIL_8 + + + + + + + + + + + + + + + + + + + + org.apache.hadoop.hive.ql.exec.TextRecordReader - - - - org.apache.hadoop.hive.ql.exec.TextRecordReader + + org.apache.hadoop.hive.ql.exec.TextRecordWriter + + + org.apache.hadoop.hive.ql.exec.TextRecordReader + + + /bin/cat + + + + + org.apache.hadoop.mapred.TextInputFormat - - org.apache.hadoop.hive.ql.exec.TextRecordWriter + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - org.apache.hadoop.hive.ql.exec.TextRecordReader - - - /bin/cat - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + field.delim + 9 - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + KEY - - - - field.delim - 9 - - - columns - KEY - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + serialization.format + 9 + - - - - org.apache.hadoop.mapred.TextInputFormat + + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + field.delim + 9 - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0,_col1 - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.DelimitedJSONSerDe - - - serialization.format - 9 - - - columns.types - string,string - - + + serialization.lib + org.apache.hadoop.hive.serde2.DelimitedJSONSerDe - - - - - - org.apache.hadoop.mapred.TextInputFormat + + serialization.format + 9 - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns.types + string,string - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - - columns.types - string,string - - - - - SCR_2 - - - - - + + + + org.apache.hadoop.mapred.TextInputFormat - - - - - - + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + field.delim + 9 + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + serialization.format + 9 + + + columns.types + string,string + + + - - - - - - _col1 - - - value - - - src - - - - - + + SCR_2 - - _col0 - - - key - - - src - - - - - + + + + - - - - - - - - + + + + - - - - - - - _col0 - - - _col1 - - - - - SEL_1 + + + + + _col1 + + + value + + + src + + + + + + + + _col0 + + + key + + + src + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_1 + + + + - - - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - - - - + + + + + + + + + _col0 + + + + + + string + + + + + + + _col1 + + + + + + string + + + + + + + + + @@ -1140,210 +1124,202 @@ - - - - - - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - + + + + + + + + + 1 - - FS_7 + + #### A masked pattern was here #### - - - - - - + + true - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - + + - - - - - - - - _col1 - - - _col1 + + 150 - - tmap + + 1 - - + + #### A masked pattern was here #### - - - - _col0 - - - _col0 + + true - - tmap + + - - + + 1 - - - - - - - - - - - - - + + FS_7 - - - - _col0 - - - _col1 - - + + + + - - - - SEL_6 - - - - - - - - - - - - - - - - _col0 + + + + + + + + key + + + + + + + + + string + + - - tmap + + + + value + + + + + + + + + string + + - - - - - string - - - - - _col1 - - - tmap - - - - - - string - - - - - - + + + + + _col1 + + + _col1 + + + tmap + + + + + + + + _col0 + + + _col0 + + + tmap + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_6 + + + + + + + + + + + + + + _col0 + + + tmap + + + + + + string + + + + + + + _col1 + + + tmap + + + + + + string + + + + + + + + + @@ -1366,11 +1342,9 @@ EX_4 - - - - - + + + Index: ql/src/test/results/compiler/plan/input5.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input5.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/input5.q.xml (working copy) @@ -308,412 +308,334 @@ tmap:src_thrift - - - - - - - - - + + + + + + + + + + + + _col1 + + + _col1 + + + + + string + + + + + + + _col0 + + + _col0 + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + reducesinkkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + _col1 + + + + + + + + + _col0 + + + + + + + + + + -1 + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string,string + + + escape.delim + \ + + + + + + + + + RS_3 + + - - - - - _col1 - - - _col1 - - - - - string - - - - - - - _col0 - - - _col0 - - - - - - - + + + + + + + + + + + tkey + + + _col0 + + + + + + string + + + + + + + tvalue + + + _col1 + + + + + + string + + + + + + + + + + + + + + org.apache.hadoop.hive.ql.exec.TextRecordReader + + + org.apache.hadoop.hive.ql.exec.TextRecordWriter + + + org.apache.hadoop.hive.ql.exec.TextRecordReader + + + /bin/cat + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + field.delim + 9 - - - - - - - - - - - _col0 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - - _col0 - - - - - - - - - - -1 - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - + + columns + KEY - - RS_3 + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - - - - + + serialization.format + 9 - - - - - - - - tkey - - - _col0 - - - - - - string - - - - - - - tvalue - - - _col1 - - - - - - string - - - - - - - - - - - org.apache.hadoop.hive.ql.exec.TextRecordReader + + + + org.apache.hadoop.mapred.TextInputFormat - - org.apache.hadoop.hive.ql.exec.TextRecordWriter + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - org.apache.hadoop.hive.ql.exec.TextRecordReader - - - /bin/cat - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + field.delim + 9 - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0,_col1 - - - - field.delim - 9 - - - columns - KEY - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - + + serialization.lib + org.apache.hadoop.hive.serde2.DelimitedJSONSerDe - - - - - - org.apache.hadoop.mapred.TextInputFormat + + serialization.format + 9 - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns.types + array<int>,array<struct<myint:int,mystring:string,underscore_int:int>> - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.DelimitedJSONSerDe - - - serialization.format - 9 - - - columns.types - array<int>,array<struct<myint:int,mystring:string,underscore_int:int>> - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - - columns.types - string,string - - - - - - - SCR_2 - - - - - + + + + org.apache.hadoop.mapred.TextInputFormat - - - - - - + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - _col1 - - - lintstring - - - src_thrift - - - - - - - - - myint - - - mystring - - - underscore_int - - + + + + field.delim + 9 - - - - - - int - - - - - - - - - - + + columns + _col0,_col1 + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + serialization.format + 9 + + + columns.types + string,string + @@ -720,97 +642,163 @@ - - _col0 - - - lint - - - src_thrift - - - - - - - - - + + SCR_2 - - - - - - - - - - - - - + + + + - - - - _col0 + + + + - - _col1 - - - SEL_1 + + + + + _col1 + + + lintstring + + + src_thrift + + + + + + + + + myint + + + mystring + + + underscore_int + + + + + + + + + int + + + + + + + + + + + + + + + + + + + _col0 + + + lint + + + src_thrift + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_1 + + + + - - - - - - - - - - - - - - - _col0 - - - - - - array<int> - - - - - - - _col1 - - - - - - array<struct<myint:int,mystring:string,underscore_int:int>> - - - - - - - - - - + + + + + + + + + _col0 + + + + + + array<int> + + + + + + + _col1 + + + + + + array<struct<myint:int,mystring:string,underscore_int:int>> + + + + + + + + + @@ -1206,210 +1194,202 @@ - - - - - - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - + + + + + + + + + 1 - - FS_6 + + #### A masked pattern was here #### - - - - - - + + true - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - + + - - - - - - - - _col1 - - - _col1 + + 150 - - tmap + + 1 - - + + #### A masked pattern was here #### - - - - _col0 - - - _col0 + + true - - tmap + + - - + + 1 - - - - - - - - - - - - - + + FS_6 - - - - _col0 - - - _col1 - - + + + + - - - - SEL_5 - - - - - - - - - - - - - - - - _col0 + + + + + + + + key + + + + + + + + + string + + - - tmap + + + + value + + + + + + + + + string + + - - - - - string - - - - - _col1 - - - tmap - - - - - - string - - - - - - + + + + + _col1 + + + _col1 + + + tmap + + + + + + + + _col0 + + + _col0 + + + tmap + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_5 + + + + + + + + + + + + + + _col0 + + + tmap + + + + + + string + + + + + + + _col1 + + + tmap + + + + + + string + + + + + + + + + @@ -1432,11 +1412,9 @@ EX_4 - - - - - + + + Index: ql/src/test/results/compiler/plan/input6.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input6.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/input6.q.xml (working copy) @@ -79,137 +79,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest1 + + + columns.types + string:string + + + serialization.ddl + struct dest1 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_6 + + + + - - FS_6 - - - - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + string + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -644,206 +640,161 @@ src1 - - - - - - - - - + + + + + + + + + + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_3 + + - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - + - - - - - - _col1 - - - value - - - src1 - - - - - + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src1 - - - - - + + src1 + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src1 + + + - - SEL_2 - - + + + + + - + + + + - - - - - - - - _col0 - - - src1 - - - - - - string - - - - - - - _col1 - - - src1 - - - - - - string - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_2 + + + + + + + + + - - - key + + + _col0 src1 - + + + string + - - - - - - - - - boolean + + + + _col1 + + + src1 + + + + + + string + + @@ -851,59 +802,92 @@ - - FIL_4 + + + + + + + + + + + key + + + src1 + + + + + + + + + + + + + + + boolean + + + + + + + + + FIL_4 + + + + - - - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - value - - - src1 - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src1 + + + + + + string + + + + + + + value + + + src1 + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/input7.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input7.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/input7.q.xml (working copy) @@ -79,137 +79,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest1 + + + columns.types + string:string + + + serialization.ddl + struct dest1 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_4 + + + + - - FS_4 - - - - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + string + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -644,168 +640,160 @@ src1 - - - - - - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - + + + + + + + + + 1 - - FS_2 + + #### A masked pattern was here #### - - - - - - + + true - - + + - - - - - - - - _col1 - - - key + + 150 - - src1 + + 1 - - + + #### A masked pattern was here #### - - - - _col0 - - - + + true - - - - - - - - - - + + - - + + 1 - - - - _col0 - - - _col1 - - + + FS_2 - - - - SEL_1 - - - - - + + + + - - - - - - - - - - _col0 - - - - - void - - - - - void - - - - - - - _col1 - - - src1 - - - - - - string - - - - + + - - - + + + + + _col1 + + + key + + + src1 + + + + + + + + _col0 + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_1 + + + + + + + + + + + + + + _col0 + + + + + void + + + + + void + + + + + + + _col1 + + + src1 + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/input8.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input8.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/input8.q.xml (working copy) @@ -153,335 +153,327 @@ src1 - - - - - - - - - - - #### A masked pattern was here #### + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat - - 1 + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + hive.serialization.extend.nesting.levels + true - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0,_col1,_col2 - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - double:double:double - - - escape.delim - \ - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + serialization.format + 1 + + + columns.types + double:double:double + + + escape.delim + \ + - - 1 - - - FS_2 + + 1 - + + + + FS_2 + + + + + + + + + - - - - - - - - - - - - _col0 - - - - - - - - double - - - - - double - - + + + _col0 - - - - _col1 - - - - - - - - - double - - + + - - - - _col2 - - - - - - - + + double + + double + - - - - - - - - - - _col2 - - - - - - - - - - - - - - - - - - - - _col1 - - - - - - - key + + + _col1 - src1 + - - - - string - - + + + + double + - - - - - - - - - - - - - - _col0 - - - - - - - - - int - - + + + _col2 - - 4 + + + + + + + double + - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - - + + + + + _col2 + + + + + + + + + + + + + + + + + + + + + _col1 + + + + + + + key + + + src1 + + + + + string + + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + + + int + + + + + 4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + _col2 + + + + + + + SEL_1 + + + + - - SEL_1 - - - - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - double - - - - - - - _c1 - - - _col1 - - - - - - double - - - - - - - _c2 - - - _col2 - - - - - - double - - - - - - - - - - + + + + + + + + + _c0 + + + _col0 + + + + + + double + + + + + + + _c1 + + + _col1 + + + + + + double + + + + + + + _c2 + + + _col2 + + + + + + double + + + + + + + + + Index: ql/src/test/results/compiler/plan/input9.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input9.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/input9.q.xml (working copy) @@ -79,137 +79,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest1 + + + columns.types + string:string + + + serialization.ddl + struct dest1 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_6 + + + + - - FS_6 - - - - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + string + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -644,238 +640,226 @@ src1 - - - - - - - - - + + + + + + + + + + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_3 + + - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - + + + + + + + + + + + + _col1 + + + key + + + src1 + + + + - - - - _col1 - - - key - - - src1 - - - - - + + _col0 + + + - - _col0 - - - - - - - - - - - - - - - - - + + + + + + + + - - - - _col0 - - - _col1 - - + + - - SEL_2 - - + - + _col0 + + _col1 + - - - - - - - - _col0 - - - - - void - - - + + + + SEL_2 + + + + + + + + + + + + + + _col0 + + + void - - - - _col1 - - - src1 - - - - - - string - - + + void - - - - - - - - - - - - - + + + _col1 + + + src1 + + + + + + string + + - - - - - - - - - - boolean - - - - - FIL_4 + + + + + + + + + + + + + + + + + + + + + + boolean + + + + + + + + + FIL_4 + + + + - - - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src1 + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/input_part1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input_part1.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/input_part1.q.xml (working copy) @@ -174,498 +174,486 @@ srcpart - - - - - - - - + + + + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + hive.serialization.extend.nesting.levels + true + + + columns + _col0,_col1,_col2,_col3 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + serialization.format + 1 + + + columns.types + string:string:string:string + + + escape.delim + \ + + + + + + + 1 + + + + + FS_3 + + + + + + + + + + + + + + _col0 + + + + + + + + string + + + + + string + + + + + + + _col1 + + + + + + + + + string + + + + + + + _col2 + + + + + + + + + string + + + + + + + _col3 + + + + + + + + + string + + + + + + + + + + + + + + _col3 + + + ds + + + true + + + srcpart + + + + + + + + _col2 + + + hr + + + true + + + srcpart + + + + + + + + _col1 + + + value + + + srcpart + + + + + + + + _col0 + + + key + + + srcpart + + + + + + + + + + + - - - - - #### A masked pattern was here #### - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string:string:string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - - - - _col0 - - - - - - - - string - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - _col2 - - - - - - - - - string - - - - - - - _col3 - - - - - - - - - string - - - - - - - - + + + + + + + + + + - - - + + + + _col0 + + + _col1 + + + _col2 + + _col3 - - - ds + + + + + + + SEL_2 + + + + + + + + + + + + + + key - - true + + _col0 srcpart - + + + string + - - _col2 - - - hr + + + + value - - true + + _col1 srcpart - + + + string + - - _col1 - - - value + + + + hr - - srcpart + + _col2 - - - - - - - _col0 - - - key - srcpart - + - - - - - - - - - - + + string - - - - - - - - - - - - - _col0 + + + + ds - - _col1 - - - _col2 - - + _col3 - - - - - - SEL_2 - - - - - - - - - - - - - - - - key - - - _col0 - - - srcpart - - - - - - string - - - - - - - value - - - _col1 - - - srcpart - - - - - - string - - - - - - - hr - - - _col2 - - - srcpart - - - - - - string - - - - - - - ds - - - _col3 - - - srcpart - - - - - - string - - - - - - - - - - - - - - - - - - - - - key - srcpart - + - - - - - - - - int - - + + string - - 100 - - - - - - - - boolean - - - - - FIL_4 + + + + + + + + + + + key + + + srcpart + + + + + + + + + + + + int + + + + + 100 + + + + + + + + + + + + boolean + + + + + + + + + FIL_4 + + + + - - - - - - - - - - - - - - - key - - - srcpart - - - - - - string - - - - - - - value - - - srcpart - - - - - - string - - - - - - - ds - - - srcpart - - - - - - string - - - - - - - hr - - - srcpart - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + srcpart + + + + + + string + + + + + + + value + + + srcpart + + + + + + string + + + + + + + ds + + + srcpart + + + + + + string + + + + + + + hr + + + srcpart + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/input_testsequencefile.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input_testsequencefile.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/input_testsequencefile.q.xml (working copy) @@ -79,137 +79,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - name - default.dest4_sequencefile - - - columns.types - string:string - - - serialization.ddl - struct dest4_sequencefile { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.SequenceFileInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + name + default.dest4_sequencefile + + + columns.types + string:string + + + serialization.ddl + struct dest4_sequencefile { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.SequenceFileInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_4 + + + + - - FS_4 - - - - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + string + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -644,173 +640,165 @@ src - - - - - - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - + + + + + + + + + 1 - - FS_2 + + #### A masked pattern was here #### - - - - - - + + true - - + + - - - - - - - - _col1 - - - value + + 150 - - src + + 1 - - + + #### A masked pattern was here #### - - - - _col0 - - - key + + true - - src + + - - + + 1 - - - - - - - - - - - - - + + FS_2 - - - - _col0 - - - _col1 - - + + + + - - - - SEL_1 - - - - - + + - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - - - + + + + + _col1 + + + value + + + src + + + + + + + + _col0 + + + key + + + src + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_1 + + + + - - - + + + + + + + + + _col0 + + + src + + + + + + string + + + + + + + _col1 + + + src + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/input_testxpath.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input_testxpath.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/input_testxpath.q.xml (working copy) @@ -161,420 +161,412 @@ src_thrift - - - - - - - - - - - #### A masked pattern was here #### + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat - - 1 + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + hive.serialization.extend.nesting.levels + true - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0,_col1,_col2 - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - int:string:string - - - escape.delim - \ - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - - 1 - - - - - FS_2 - - - - - - - - - - - - - - - - _col0 - - - - - - - - int - - - - - int - - + + serialization.format + 1 - - - - _col1 - - - - - - - - string - - - - - string - - + + columns.types + int:string:string - - - - _col2 - - - - - - - - - string - - + + escape.delim + \ + + 1 + - - - - - - _col2 - - + + FS_2 + + + + + + + + + - - - mstringstring + + + _col0 - src_thrift + - - - - + + + + int - - - + + int + - - - + + + _col1 - - key_2 + + - - - - - - - - - - - - - - _col1 - - - - - - - - - lintstring + + + + string - - src_thrift - - - - - - - - - myint - - - mystring - - - underscore_int - - - - - - - - - - - - - - - - - - - - - - - - - - - 0 - - + + string - - - - - - - - - - mystring - - - false - - - - - - - - _col0 - - - - - - lint + + + _col2 - src_thrift + - - - - - - + + - - - - - - + + string - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - - + + + + + _col2 + + + + + + + mstringstring + + + src_thrift + + + + + + + + + + + + + + + + + + + + key_2 + + + + + + + + + + + + + + + _col1 + + + + + + + + + lintstring + + + src_thrift + + + + + + + + + myint + + + mystring + + + underscore_int + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + mystring + + + false + + + + + + + + _col0 + + + + + + + lint + + + src_thrift + + + + + + + + + + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + _col2 + + + + + + + SEL_1 + + + + - - SEL_1 - - - - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - int - - - - - - - mystring - - - _col1 - - - - - - string - - - - - - - _c2 - - - _col2 - - - - - - string - - - - - - - - - - + + + + + + + + + _c0 + + + _col0 + + + + + + int + + + + + + + mystring + + + _col1 + + + + + + string + + + + + + + _c2 + + + _col2 + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/input_testxpath2.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input_testxpath2.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/input_testxpath2.q.xml (working copy) @@ -161,140 +161,169 @@ src_thrift - - - - - - - - - + + + + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + hive.serialization.extend.nesting.levels + true + + + columns + _col0,_col1,_col2 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + serialization.format + 1 + + + columns.types + int:int:int + + + escape.delim + \ + + + + + + + 1 + + + + + FS_3 + + - - - - - #### A masked pattern was here #### + + + + + + + + + + + _col0 + + + + + + + + int + + + + + int + + + + + + + _col1 + + + + + + + + + int + + + + + + + _col2 + + + + + + + + + int + + + + + + + + + + + + + + _col2 + + + + + + + mstringstring - - 1 + + src_thrift - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - int:int:int - - - escape.delim - \ - - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - - - - _col0 - - - - - - - - int - - - + + + + - int + string - - - - _col1 - - - - - - - - - int - - + + - - - - _col2 - - - - - - - - - int - - - @@ -301,92 +330,55 @@ + + + + + + - - - - _col2 - - - - - - - mstringstring - - - src_thrift - - - - - - - string + + _col1 + + + + + + + lintstring + + + src_thrift + + + + + + + + + myint + + mystring + + + underscore_int + - - - - - - - - - - - - - - - - - - - _col1 - - - - - - - lintstring - - - src_thrift - - - - - - - - - myint - - - mystring - - - underscore_int - - + + + + - - - - - - - - - - - - + + + + + @@ -395,307 +387,303 @@ - - - - - - - - _col0 - - - - - - - lint + + + + + + + + + + _col0 + + + + + + + lint + + + src_thrift + + + + + - - src_thrift - - - - - - - - - - - - - - + + + + + + - - - - - - - - - - - - - - + + + + + + + + - - - - _col0 - - - _col1 - - - _col2 - - + + + + + - - SEL_2 - - + - + _col0 + + _col1 + + + _col2 + - - - - - - - - _c0 - - - _col0 - - - - - - int - - + + + + SEL_2 + + + + + + + + + + + + + + _c0 - - - - _c1 - - - _col1 - - - - - - int - - + + _col0 - - - - _c2 - - - _col2 - - - - - - int - - + + + + int + - - - - - - - - - - - - - - - - - - - lint - - - src_thrift - - - - - - - + + + _c1 - - + + _col1 - - - - boolean - - + + + + int + - - - - - - - - - - - mstringstring - - - src_thrift - - - - - - - - - - - - - - - - - + + + _c2 - - + + _col2 - - + + + + int + - - - - - - - - FIL_4 + + + + + + + + + + + + + + + lint + + + src_thrift + + + + + + + + + + + + + + + boolean + + + + + + + + + + + + + + + + + mstringstring + + + src_thrift + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_4 + + + + - - - - - - - - - - - - - - - lint - - - src_thrift - - - - - - array<int> - - - - - - - lintstring - - - src_thrift - - - - - - array<struct<myint:int,mystring:string,underscore_int:int>> - - - - - - - mstringstring - - - src_thrift - - - - - - map<string,string> - - - - - - - - - - + + + + + + + + + lint + + + src_thrift + + + + + + array<int> + + + + + + + lintstring + + + src_thrift + + + + + + array<struct<myint:int,mystring:string,underscore_int:int>> + + + + + + + mstringstring + + + src_thrift + + + + + + map<string,string> + + + + + + + + + Index: ql/src/test/results/compiler/plan/join1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join1.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/join1.q.xml (working copy) @@ -436,218 +436,214 @@ src2 - - - - - - - VALUE._col1 - - - value - - - src2 - - - - - string - - - - - - - VALUE._col0 - - - key - - - src2 - - - - - - - + + + + + + VALUE._col1 + + + value + + + src2 + + + + + string + + + + + + + VALUE._col0 + + + key + + + src2 + + + + + + + + + + + + + + + + + + + key + + + src2 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col1 + + + + + + + + 1 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string + + + escape.delim + \ + + + + + + + + + src2 + + + RS_3 + + + + - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col1 - - - - - - - - 1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - src2 - - - RS_3 - - - - - - - - - - - - - - - - VALUE._col0 - - - src2 - - - - - - string - - - - - - - VALUE._col1 - - - src2 - - - - - - string - - - - - - - - - - + + + + + + + + + VALUE._col0 + + + src2 + + + + + + string + + + + + + + VALUE._col1 + + + src2 + + + + + + string + + + + + + + + + @@ -770,181 +766,177 @@ src1 - - - - - - - VALUE._col0 - - - key - - - src1 - - - - - - - + + + + + + VALUE._col0 + + + key + + + src1 + + + + + + + + + + + + + + + + + + + key + + + src1 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string + + + escape.delim + \ + + + + + + + + + src1 + + + RS_2 + + + + - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - src1 - - - RS_2 - - - - - - - - - - - - - - - - VALUE._col0 - - - src1 - - - - - - string - - - - - - - - - - + + + + + + + + + VALUE._col0 + + + src1 + + + + + + string + + + + + + + + + @@ -1253,210 +1245,202 @@ - - - - - - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - + + + + + + + + + 1 - - FS_6 + + #### A masked pattern was here #### - - - - - - + + true - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - + + - - - - - - - - _col1 - - - _col5 + + 150 - - src2 + + 1 - - + + #### A masked pattern was here #### - - - - _col0 - - - _col0 + + true - - src1 + + - - + + 1 - - - - - - - - - - - - - + + FS_6 - - - - _col0 - - - _col1 - - + + + + - - - - SEL_5 - - - - - - - - - - - - - - - - _col0 + + + + + + + + key + + + + + + + + + string + + - - src1 + + + + value + + + + + + + + + string + + - - - - - string - - - - - _col1 - - - src2 - - - - - - string - - - - - - + + + + + _col1 + + + _col5 + + + src2 + + + + + + + + _col0 + + + _col0 + + + src1 + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_5 + + + + + + + + + + + + + + _col0 + + + src1 + + + + + + string + + + + + + + _col1 + + + src2 + + + + + + string + + + + + + + + + @@ -1606,14 +1590,12 @@ JOIN_4 - - - - - - - - + + + + + + Index: ql/src/test/results/compiler/plan/join2.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join2.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/join2.q.xml (working copy) @@ -311,213 +311,209 @@ $INTNAME - - - - - - - VALUE._col4 - - - _col0 - - - src1 - - - - - string - - - - - - + + + + + + VALUE._col4 + + + _col0 + + + src1 + + + + + string + + + + + + + + + + + + + + + + + + + + + + _col0 + + + src1 + + + + + + + + + + _col4 + + + src2 + + + + + + + + + + + + + + + double + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + double + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col4 + + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col4 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string + + + escape.delim + \ + + + + + + + + + RS_6 + + + + - - - - - - - - - - - - - - - _col0 - - - src1 - - - - - - - - - - _col4 - - - src2 - - - - - - - - - - - - - - - double - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - double - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col4 - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col4 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_6 - - - - - - - - - - - - - - - - VALUE._col4 - - - src1 - - - - - - string - - - - - - - - - - + + + + + + + + + VALUE._col4 + + + src1 + + + + + + string + + + + + + + + + @@ -591,205 +587,201 @@ src3 - - - - - - - VALUE._col1 - - - value - - - src3 - - - - - - - + + + + + + VALUE._col1 + + + value + + + src3 + + + + + + + + + + + + + + + + + + + + + + + key + + + src3 + + + + + + + + + + + + org.apache.hadoop.hive.ql.udf.UDFToDouble + + + UDFToDouble + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + double + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col1 + + + + + + + + 1 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string + + + escape.delim + \ + + + + + + + + + src3 + + + RS_7 + + + + - - - - - - - - - - - - - - - key - - - src3 - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFToDouble - - - UDFToDouble - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - double - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col1 - - - - - - - - 1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - src3 - - - RS_7 - - - - - - - - - - - - - - - - VALUE._col1 - - - src3 - - - - - - string - - - - - - - - - - + + + + + + + + + VALUE._col1 + + + src3 + + + + + + string + + + + + + + + + @@ -1160,210 +1152,202 @@ - - - - - - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - + + + + + + + + + 1 - - FS_10 + + #### A masked pattern was here #### - - - - - - + + true - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - + + - - - - - - - - _col1 - - - _col9 + + 150 - - src3 + + 1 - - + + #### A masked pattern was here #### - - - - _col0 - - - _col4 + + true - - src1 + + - - + + 1 - - - - - - - - - - - - - + + FS_10 - - - - _col0 - - - _col1 - - + + + + - - - - SEL_9 - - - - - - - - - - - - - - - - _col0 + + + + + + + + key + + + + + + + + + string + + - - src1 + + + + value + + + + + + + + + string + + - - - - - string - - - - - _col1 - - - src3 - - - - - - string - - - - - - + + + + + _col1 + + + _col9 + + + src3 + + + + + + + + _col0 + + + _col4 + + + src1 + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_9 + + + + + + + + + + + + + + _col0 + + + src1 + + + + + + string + + + + + + + _col1 + + + src3 + + + + + + string + + + + + + + + + @@ -1529,14 +1513,12 @@ JOIN_8 - - - - - - - - + + + + + + @@ -1904,184 +1886,180 @@ src2 - - - - - - - VALUE._col0 - - - key - - - src2 - - - - - - - + + + + + + VALUE._col0 + + + key + + + src2 + + + + + + + + + + + + + + + + + + + key + + + src2 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + + + + + + 1 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string + + + escape.delim + \ + + + + + + + + + src2 + + + RS_4 + + + + - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - 1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - src2 - - - RS_4 - - - - - - - - - - - - - - - - VALUE._col0 - - - src2 - - - - - - string - - - - - - - - - - + + + + + + + + + VALUE._col0 + + + src2 + + + + + + string + + + + + + + + + @@ -2194,181 +2172,177 @@ src1 - - - - - - - VALUE._col0 - - - key - - - src1 - - - - - - - + + + + + + VALUE._col0 + + + key + + + src1 + + + + + + + + + + + + + + + + + + + key + + + src1 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string + + + escape.delim + \ + + + + + + + + + src1 + + + RS_3 + + + + - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - src1 - - - RS_3 - - - - - - - - - - - - - - - - VALUE._col0 - - - src1 - - - - - - string - - - - - - - - - - + + + + + + + + + VALUE._col0 + + + src1 + + + + + + string + + + + + + + + + @@ -2671,41 +2645,37 @@ - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + + 1 + + + + + FS_11 + + + + - - FS_11 - - - - - - - - - - - - - - + + + + + + @@ -2855,14 +2825,12 @@ JOIN_5 - - - - - - - - + + + + + + Index: ql/src/test/results/compiler/plan/join3.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join3.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/join3.q.xml (working copy) @@ -572,180 +572,176 @@ src2 - - - - - - - VALUE._col0 - - - key - - - src2 - - - - - string - - - - - - + + + + + + VALUE._col0 + + + key + + + src2 + + + + + string + + + + + + + + + + + + + + + + + + key + + + src2 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + + + + + 1 + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + + + + escape.delim + \ + + + + + + + + + src2 + + + RS_4 + + + + - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - - - - - 1 - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - - - - escape.delim - \ - - - - - - - - - src2 - - - RS_4 - - - - - - - - - - - - - - - - VALUE._col0 - - - src2 - - - - - - string - - - - - - - - - - + + + + + + + + + VALUE._col0 + + + src2 + + + + + + string + + + + + + + + + @@ -862,214 +858,210 @@ src3 - - - - - - - VALUE._col1 - - - value - - - src3 - - - - - - - - VALUE._col0 - - - key - - - src3 - - - - - - - + + + + + + VALUE._col1 + + + value + + + src3 + + + + + + + + VALUE._col0 + + + key + + + src3 + + + + + + + + + + + + + + + + + + + key + + + src3 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col1 + + + + + + + + 2 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string + + + escape.delim + \ + + + + + + + + + src3 + + + RS_5 + + + + - - - - - - - - - - - key - - - src3 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col1 - - - - - - - - 2 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - src3 - - - RS_5 - - - - - - - - - - - - - - - - VALUE._col0 - - - src3 - - - - - - string - - - - - - - VALUE._col1 - - - src3 - - - - - - string - - - - - - - - - - + + + + + + + + + VALUE._col0 + + + src3 + + + + + + string + + + + + + + VALUE._col1 + + + src3 + + + + + + string + + + + + + + + + @@ -1188,181 +1180,177 @@ src1 - - - - - - - VALUE._col0 - - - key - - - src1 - - - - - - - + + + + + + VALUE._col0 + + + key + + + src1 + + + + + + + + + + + + + + + + + + + key + + + src1 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string + + + escape.delim + \ + + + + + + + + + src1 + + + RS_3 + + + + - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - src1 - - - RS_3 - - - - - - - - - - - - - - - - VALUE._col0 - - - src1 - - - - - - string - - - - - - - - - - + + + + + + + + + VALUE._col0 + + + src1 + + + + + + string + + + + + + + + + @@ -1674,210 +1662,202 @@ - - - - - - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - + + + + + + + + + 1 - - FS_8 + + #### A masked pattern was here #### - - - - - - + + true - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - + + - - - - - - - - _col1 - - - _col9 + + 150 - - src3 + + 1 - - + + #### A masked pattern was here #### - - - - _col0 - - - _col0 + + true - - src1 + + - - + + 1 - - - - - - - - - - - - - + + FS_8 - - - - _col0 - - - _col1 - - + + + + - - - - SEL_7 - - - - - - - - - - - - - - - - _col0 + + + + + + + + key + + + + + + + + + string + + - - src1 + + + + value + + + + + + + + + string + + - - - - - string - - - - - _col1 - - - src3 - - - - - - string - - - - - - + + + + + _col1 + + + _col9 + + + src3 + + + + + + + + _col0 + + + _col0 + + + src1 + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_7 + + + + + + + + + + + + + + _col0 + + + src1 + + + + + + string + + + + + + + _col1 + + + src3 + + + + + + string + + + + + + + + + @@ -2061,17 +2041,15 @@ JOIN_6 - - - - - - - - - - - + + + + + + + + + Index: ql/src/test/results/compiler/plan/join4.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join4.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/join4.q.xml (working copy) @@ -289,478 +289,466 @@ c:a:src1 - - - - - - - - - + + + + + + + + + + + + VALUE._col1 + + + _col1 + + + + + string + + + + + + + VALUE._col0 + + + _col0 + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + _col1 + + + + + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string,string + + + escape.delim + \ + + + + + + + + + a + + + RS_6 + + - - - - - VALUE._col1 - - - _col1 - - - - - string - - - - - - - VALUE._col0 - - - _col0 - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - - - a - - - RS_6 - - - - - - - - - - - - - - - - VALUE._col0 - - - a - - - - - - string - - - - - - - VALUE._col1 - - - a - - - - - - string - - - - - - - - + - - - - - - _col1 - - - value - - - src1 - - - - - + + + + + + + + + VALUE._col0 + + + a + + + + + + string + + + + + + + VALUE._col1 + + + a + + + + + + string + + + + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src1 - - - - - + + src1 + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src1 + + + - - SEL_5 - - + + + + + - + + + + - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_5 + + + + + + + + + - - - - - - - key - - - src1 - - - - - - - - - - - - int - - - - - 10 - - - - + + + _col0 - - + + - - - - boolean - - + + string - - - - - - - key - - - src1 - - - - - - - - - - - - - 20 - - - - + + + _col1 - - + + - - + + string - - - - - - - - FIL_12 + + + + + + + + + + + + + + + key + + + src1 + + + + + + + + + + + + int + + + + + 10 + + + + + + + + + + + + boolean + + + + + + + + + + + + + key + + + src1 + + + + + + + + + + + + + 20 + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_12 + + + + - - - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - value - - - src1 - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src1 + + + + + + string + + + + + + + value + + + src1 + + + + + + string + + + + + + + + + @@ -857,469 +845,457 @@ c:b:src2 - - - - - - - - - + + + + + + + + + + + + VALUE._col1 + + + _col1 + + + + + + + + VALUE._col0 + + + _col0 + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + _col1 + + + + + + + + 1 + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string,string + + + escape.delim + \ + + + + + + + + + b + + + RS_7 + + - - - - - VALUE._col1 - - - _col1 - - - - - - - - VALUE._col0 - - - _col0 - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - 1 - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - - - b - - - RS_7 - - - - - - - - - - - - - - - - VALUE._col0 - - - b - - - - - - string - - - - - - - VALUE._col1 - - - b - - - - - - string - - - - - - - - + - - - - - - _col1 - - - value - - - src2 - - - - - + + + + + + + + + VALUE._col0 + + + b + + + + + + string + + + + + + + VALUE._col1 + + + b + + + + + + string + + + + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src2 - - - - - + + src2 + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src2 + + + - - SEL_2 - - + + + + + - + + + + - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_2 + + + + + + + + + - - - - - - - key - - - src2 - - - - - - - - - - - - - 15 - - - - + + + _col0 - - + + - - + + string - - - - - - - key - - - src2 - - - - - - - - - - - - - 25 - - - - + + + _col1 - - + + - - + + string - - - - - - - - FIL_13 + + + + + + + + + + + + + + + key + + + src2 + + + + + + + + + + + + + 15 + + + + + + + + + + + + + + + + + + + + + key + + + src2 + + + + + + + + + + + + + 25 + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_13 + + + + - - - - - - - - - - - - - - - key - - - src2 - - - - - - string - - - - - - - value - - - src2 - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src2 + + + + + + string + + + + + + + value + + + src2 + + + + + + string + + + + + + + + + @@ -1602,349 +1578,341 @@ - - - - - - - - - - - #### A masked pattern was here #### + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat - - 1 + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + hive.serialization.extend.nesting.levels + true - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0,_col1,_col2,_col3 - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string:string:string - - - escape.delim - \ - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + serialization.format + 1 + + + columns.types + string:string:string:string + + + escape.delim + \ + - - 1 - - - FS_11 + + 1 - + + + + FS_11 + + + + + + + + + - + + + _col0 + + + + + + + + + string + + - - - - - - - - - - _col0 - - - - - - - - - string - - + + + + _col1 - - - - _col1 - - - - - - - - - string - - + + - - - - _col2 - - - - - - - - - string - - + + - - - - _col3 - - - - - - - - - string - - + + string - - - - - - - - - - _col3 - - - _col3 - - - b - - - - - - - - _col2 - - - _col2 - - - b - - - - - - - - _col1 - - - _col1 - - - a - - - - - - - - _col0 - - - _col0 - - - a - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - - - - SEL_9 - - - - - - - - - - - - - - - - c1 + + + + _col2 + + + + + + + + + string + + - - _col0 + + + + _col3 + + + + + + + + + string + + - - c - - - - - - string - - - - - c2 - - - _col1 - - - c - - - - - - string - - - - - - - c3 - - - _col2 - - - c - - - - - - string - - - - - - - c4 - - - _col3 - - - c - - - - - - string - - - - - - + + + + + _col3 + + + _col3 + + + b + + + + + + + + _col2 + + + _col2 + + + b + + + + + + + + _col1 + + + _col1 + + + a + + + + + + + + _col0 + + + _col0 + + + a + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + _col2 + + + _col3 + + + + + + + SEL_9 + + + + + + + + + + + + + + c1 + + + _col0 + + + c + + + + + + string + + + + + + + c2 + + + _col1 + + + c + + + + + + string + + + + + + + c3 + + + _col2 + + + c + + + + + + string + + + + + + + c4 + + + _col3 + + + c + + + + + + string + + + + + + + + + @@ -2126,14 +2094,12 @@ JOIN_8 - - - - - - - - + + + + + + Index: ql/src/test/results/compiler/plan/join5.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join5.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/join5.q.xml (working copy) @@ -289,478 +289,466 @@ c:a:src1 - - - - - - - - - + + + + + + + + + + + + VALUE._col1 + + + _col1 + + + + + string + + + + + + + VALUE._col0 + + + _col0 + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + _col1 + + + + + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string,string + + + escape.delim + \ + + + + + + + + + a + + + RS_6 + + - - - - - VALUE._col1 - - - _col1 - - - - - string - - - - - - - VALUE._col0 - - - _col0 - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - - - a - - - RS_6 - - - - - - - - - - - - - - - - VALUE._col0 - - - a - - - - - - string - - - - - - - VALUE._col1 - - - a - - - - - - string - - - - - - - - + - - - - - - _col1 - - - value - - - src1 - - - - - + + + + + + + + + VALUE._col0 + + + a + + + + + + string + + + + + + + VALUE._col1 + + + a + + + + + + string + + + + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src1 - - - - - + + src1 + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src1 + + + - - SEL_5 - - + + + + + - + + + + - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_5 + + + + + + + + + - - - - - - - key - - - src1 - - - - - - - - - - - - int - - - - - 10 - - - - + + + _col0 - - + + - - - - boolean - - + + string - - - - - - - key - - - src1 - - - - - - - - - - - - - 20 - - - - + + + _col1 - - + + - - + + string - - - - - - - - FIL_12 + + + + + + + + + + + + + + + key + + + src1 + + + + + + + + + + + + int + + + + + 10 + + + + + + + + + + + + boolean + + + + + + + + + + + + + key + + + src1 + + + + + + + + + + + + + 20 + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_12 + + + + - - - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - value - - - src1 - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src1 + + + + + + string + + + + + + + value + + + src1 + + + + + + string + + + + + + + + + @@ -857,469 +845,457 @@ c:b:src2 - - - - - - - - - + + + + + + + + + + + + VALUE._col1 + + + _col1 + + + + + + + + VALUE._col0 + + + _col0 + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + _col1 + + + + + + + + 1 + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string,string + + + escape.delim + \ + + + + + + + + + b + + + RS_7 + + - - - - - VALUE._col1 - - - _col1 - - - - - - - - VALUE._col0 - - - _col0 - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - 1 - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - - - b - - - RS_7 - - - - - - - - - - - - - - - - VALUE._col0 - - - b - - - - - - string - - - - - - - VALUE._col1 - - - b - - - - - - string - - - - - - - - + - - - - - - _col1 - - - value - - - src2 - - - - - + + + + + + + + + VALUE._col0 + + + b + + + + + + string + + + + + + + VALUE._col1 + + + b + + + + + + string + + + + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src2 - - - - - + + src2 + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src2 + + + - - SEL_2 - - + + + + + - + + + + - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_2 + + + + + + + + + - - - - - - - key - - - src2 - - - - - - - - - - - - - 15 - - - - + + + _col0 - - + + - - + + string - - - - - - - key - - - src2 - - - - - - - - - - - - - 25 - - - - + + + _col1 - - + + - - + + string - - - - - - - - FIL_13 + + + + + + + + + + + + + + + key + + + src2 + + + + + + + + + + + + + 15 + + + + + + + + + + + + + + + + + + + + + key + + + src2 + + + + + + + + + + + + + 25 + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_13 + + + + - - - - - - - - - - - - - - - key - - - src2 - - - - - - string - - - - - - - value - - - src2 - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src2 + + + + + + string + + + + + + + value + + + src2 + + + + + + string + + + + + + + + + @@ -1602,349 +1578,341 @@ - - - - - - - - - - - #### A masked pattern was here #### + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat - - 1 + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + hive.serialization.extend.nesting.levels + true - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0,_col1,_col2,_col3 - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string:string:string - - - escape.delim - \ - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + serialization.format + 1 + + + columns.types + string:string:string:string + + + escape.delim + \ + - - 1 - - - FS_11 + + 1 - + + + + FS_11 + + + + + + + + + - + + + _col0 + + + + + + + + + string + + - - - - - - - - - - _col0 - - - - - - - - - string - - + + + + _col1 - - - - _col1 - - - - - - - - - string - - + + - - - - _col2 - - - - - - - - - string - - + + - - - - _col3 - - - - - - - - - string - - + + string - - - - - - - - - - _col3 - - - _col3 - - - b - - - - - - - - _col2 - - - _col2 - - - b - - - - - - - - _col1 - - - _col1 - - - a - - - - - - - - _col0 - - - _col0 - - - a - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - - - - SEL_9 - - - - - - - - - - - - - - - - c1 + + + + _col2 + + + + + + + + + string + + - - _col0 + + + + _col3 + + + + + + + + + string + + - - c - - - - - - string - - - - - c2 - - - _col1 - - - c - - - - - - string - - - - - - - c3 - - - _col2 - - - c - - - - - - string - - - - - - - c4 - - - _col3 - - - c - - - - - - string - - - - - - + + + + + _col3 + + + _col3 + + + b + + + + + + + + _col2 + + + _col2 + + + b + + + + + + + + _col1 + + + _col1 + + + a + + + + + + + + _col0 + + + _col0 + + + a + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + _col2 + + + _col3 + + + + + + + SEL_9 + + + + + + + + + + + + + + c1 + + + _col0 + + + c + + + + + + string + + + + + + + c2 + + + _col1 + + + c + + + + + + string + + + + + + + c3 + + + _col2 + + + c + + + + + + string + + + + + + + c4 + + + _col3 + + + c + + + + + + string + + + + + + + + + @@ -2122,14 +2090,12 @@ JOIN_8 - - - - - - - - + + + + + + Index: ql/src/test/results/compiler/plan/join6.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join6.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/join6.q.xml (working copy) @@ -289,478 +289,466 @@ c:a:src1 - - - - - - - - - + + + + + + + + + + + + VALUE._col1 + + + _col1 + + + + + string + + + + + + + VALUE._col0 + + + _col0 + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + _col1 + + + + + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string,string + + + escape.delim + \ + + + + + + + + + a + + + RS_6 + + - - - - - VALUE._col1 - - - _col1 - - - - - string - - - - - - - VALUE._col0 - - - _col0 - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - - - a - - - RS_6 - - - - - - - - - - - - - - - - VALUE._col0 - - - a - - - - - - string - - - - - - - VALUE._col1 - - - a - - - - - - string - - - - - - - - + - - - - - - _col1 - - - value - - - src1 - - - - - + + + + + + + + + VALUE._col0 + + + a + + + + + + string + + + + + + + VALUE._col1 + + + a + + + + + + string + + + + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src1 - - - - - + + src1 + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src1 + + + - - SEL_5 - - + + + + + - + + + + - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_5 + + + + + + + + + - - - - - - - key - - - src1 - - - - - - - - - - - - int - - - - - 10 - - - - + + + _col0 - - + + - - - - boolean - - + + string - - - - - - - key - - - src1 - - - - - - - - - - - - - 20 - - - - + + + _col1 - - + + - - + + string - - - - - - - - FIL_12 + + + + + + + + + + + + + + + key + + + src1 + + + + + + + + + + + + int + + + + + 10 + + + + + + + + + + + + boolean + + + + + + + + + + + + + key + + + src1 + + + + + + + + + + + + + 20 + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_12 + + + + - - - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - value - - - src1 - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src1 + + + + + + string + + + + + + + value + + + src1 + + + + + + string + + + + + + + + + @@ -857,469 +845,457 @@ c:b:src2 - - - - - - - - - + + + + + + + + + + + + VALUE._col1 + + + _col1 + + + + + + + + VALUE._col0 + + + _col0 + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + _col1 + + + + + + + + 1 + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string,string + + + escape.delim + \ + + + + + + + + + b + + + RS_7 + + - - - - - VALUE._col1 - - - _col1 - - - - - - - - VALUE._col0 - - - _col0 - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - 1 - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - - - b - - - RS_7 - - - - - - - - - - - - - - - - VALUE._col0 - - - b - - - - - - string - - - - - - - VALUE._col1 - - - b - - - - - - string - - - - - - - - + - - - - - - _col1 - - - value - - - src2 - - - - - + + + + + + + + + VALUE._col0 + + + b + + + + + + string + + + + + + + VALUE._col1 + + + b + + + + + + string + + + + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src2 - - - - - + + src2 + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src2 + + + - - SEL_2 - - + + + + + - + + + + - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_2 + + + + + + + + + - - - - - - - key - - - src2 - - - - - - - - - - - - - 15 - - - - + + + _col0 - - + + - - + + string - - - - - - - key - - - src2 - - - - - - - - - - - - - 25 - - - - + + + _col1 - - + + - - + + string - - - - - - - - FIL_13 + + + + + + + + + + + + + + + key + + + src2 + + + + + + + + + + + + + 15 + + + + + + + + + + + + + + + + + + + + + key + + + src2 + + + + + + + + + + + + + 25 + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_13 + + + + - - - - - - - - - - - - - - - key - - - src2 - - - - - - string - - - - - - - value - - - src2 - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src2 + + + + + + string + + + + + + + value + + + src2 + + + + + + string + + + + + + + + + @@ -1602,349 +1578,341 @@ - - - - - - - - - - - #### A masked pattern was here #### + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat - - 1 + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + hive.serialization.extend.nesting.levels + true - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0,_col1,_col2,_col3 - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string:string:string - - - escape.delim - \ - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + serialization.format + 1 + + + columns.types + string:string:string:string + + + escape.delim + \ + - - 1 - - - FS_11 + + 1 - + + + + FS_11 + + + + + + + + + - + + + _col0 + + + + + + + + + string + + - - - - - - - - - - _col0 - - - - - - - - - string - - + + + + _col1 - - - - _col1 - - - - - - - - - string - - + + - - - - _col2 - - - - - - - - - string - - + + - - - - _col3 - - - - - - - - - string - - + + string - - - - - - - - - - _col3 - - - _col3 - - - b - - - - - - - - _col2 - - - _col2 - - - b - - - - - - - - _col1 - - - _col1 - - - a - - - - - - - - _col0 - - - _col0 - - - a - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - - - - SEL_9 - - - - - - - - - - - - - - - - c1 + + + + _col2 + + + + + + + + + string + + - - _col0 + + + + _col3 + + + + + + + + + string + + - - c - - - - - - string - - - - - c2 - - - _col1 - - - c - - - - - - string - - - - - - - c3 - - - _col2 - - - c - - - - - - string - - - - - - - c4 - - - _col3 - - - c - - - - - - string - - - - - - + + + + + _col3 + + + _col3 + + + b + + + + + + + + _col2 + + + _col2 + + + b + + + + + + + + _col1 + + + _col1 + + + a + + + + + + + + _col0 + + + _col0 + + + a + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + _col2 + + + _col3 + + + + + + + SEL_9 + + + + + + + + + + + + + + c1 + + + _col0 + + + c + + + + + + string + + + + + + + c2 + + + _col1 + + + c + + + + + + string + + + + + + + c3 + + + _col2 + + + c + + + + + + string + + + + + + + c4 + + + _col3 + + + c + + + + + + string + + + + + + + + + @@ -2129,14 +2097,12 @@ JOIN_8 - - - - - - - - + + + + + + Index: ql/src/test/results/compiler/plan/join7.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join7.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/join7.q.xml (working copy) @@ -425,478 +425,466 @@ c:a:src1 - - - - - - - - - + + + + + + + + + + + + VALUE._col1 + + + _col1 + + + + + string + + + + + + + VALUE._col0 + + + _col0 + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + _col1 + + + + + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string,string + + + escape.delim + \ + + + + + + + + + a + + + RS_9 + + - - - - - VALUE._col1 - - - _col1 - - - - - string - - - - - - - VALUE._col0 - - - _col0 - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - - - a - - - RS_9 - - - - - - - - - - - - - - - - VALUE._col0 - - - a - - - - - - string - - - - - - - VALUE._col1 - - - a - - - - - - string - - - - - - - - + - - - - - - _col1 - - - value - - - src1 - - - - - + + + + + + + + + VALUE._col0 + + + a + + + + + + string + + + + + + + VALUE._col1 + + + a + + + + + + string + + + + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src1 - - - - - + + src1 + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src1 + + + - - SEL_8 - - + + + + + - + + + + - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_8 + + + + + + + + + - - - - - - - key - - - src1 - - - - - - - - - - - - int - - - - - 10 - - - - + + + _col0 - - + + - - - - boolean - - + + string - - - - - - - key - - - src1 - - - - - - - - - - - - - 20 - - - - + + + _col1 - - + + - - + + string - - - - - - - - FIL_16 + + + + + + + + + + + + + + + key + + + src1 + + + + + + + + + + + + int + + + + + 10 + + + + + + + + + + + + boolean + + + + + + + + + + + + + key + + + src1 + + + + + + + + + + + + + 20 + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_16 + + + + - - - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - value - - - src1 - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src1 + + + + + + string + + + + + + + value + + + src1 + + + + + + string + + + + + + + + + @@ -993,469 +981,457 @@ c:b:src2 - - - - - - - - - + + + + + + + + + + + + VALUE._col1 + + + _col1 + + + + + + + + VALUE._col0 + + + _col0 + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + _col1 + + + + + + + + 1 + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string,string + + + escape.delim + \ + + + + + + + + + b + + + RS_10 + + - - - - - VALUE._col1 - - - _col1 - - - - - - - - VALUE._col0 - - - _col0 - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - 1 - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - - - b - - - RS_10 - - - - - - - - - - - - - - - - VALUE._col0 - - - b - - - - - - string - - - - - - - VALUE._col1 - - - b - - - - - - string - - - - - - - - + - - - - - - _col1 - - - value - - - src2 - - - - - + + + + + + + + + VALUE._col0 + + + b + + + + + + string + + + + + + + VALUE._col1 + + + b + + + + + + string + + + + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src2 - - - - - + + src2 + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src2 + + + - - SEL_2 - - + + + + + - + + + + - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_2 + + + + + + + + + - - - - - - - key - - - src2 - - - - - - - - - - - - - 15 - - - - + + + _col0 - - + + - - + + string - - - - - - - key - - - src2 - - - - - - - - - - - - - 25 - - - - + + + _col1 - - + + - - + + string - - - - - - - - FIL_17 + + + + + + + + + + + + + + + key + + + src2 + + + + + + + + + + + + + 15 + + + + + + + + + + + + + + + + + + + + + key + + + src2 + + + + + + + + + + + + + 25 + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_17 + + + + - - - - - - - - - - - - - - - key - - - src2 - - - - - - string - - - - - - - value - - - src2 - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src2 + + + + + + string + + + + + + + value + + + src2 + + + + + + string + + + + + + + + + @@ -1548,469 +1524,457 @@ c:c:src3 - - - - - - - - - + + + + + + + + + + + + VALUE._col1 + + + _col1 + + + + + + + + VALUE._col0 + + + _col0 + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + _col1 + + + + + + + + 2 + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string,string + + + escape.delim + \ + + + + + + + + + c + + + RS_11 + + - - - - - VALUE._col1 - - - _col1 - - - - - - - - VALUE._col0 - - - _col0 - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - 2 - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - - - c - - - RS_11 - - - - - - - - - - - - - - - - VALUE._col0 - - - c - - - - - - string - - - - - - - VALUE._col1 - - - c - - - - - - string - - - - - - - - + - - - - - - _col1 - - - value - - - src3 - - - - - + + + + + + + + + VALUE._col0 + + + c + + + + + + string + + + + + + + VALUE._col1 + + + c + + + + + + string + + + + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src3 - - - - - + + src3 + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src3 + + + - - SEL_5 - - + + + + + - + + + + - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_5 + + + + + + + + + - - - - - - - key - - - src3 - - - - - - - - - - - - - 20 - - - - + + + _col0 - - + + - - + + string - - - - - - - key - - - src3 - - - - - - - - - - - - - 25 - - - - + + + _col1 - - + + - - + + string - - - - - - - - FIL_18 + + + + + + + + + + + + + + + key + + + src3 + + + + + + + + + + + + + 20 + + + + + + + + + + + + + + + + + + + + + key + + + src3 + + + + + + + + + + + + + 25 + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_18 + + + + - - - - - - - - - - - - - - - key - - - src3 - - - - - - string - - - - - - - value - - - src3 - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src3 + + + + + + string + + + + + + + value + + + src3 + + + + + + string + + + + + + + + + @@ -2296,459 +2260,451 @@ - - - - - - - - - - - #### A masked pattern was here #### + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat - - 1 + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + hive.serialization.extend.nesting.levels + true - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0,_col1,_col2,_col3,_col4,_col5 - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3,_col4,_col5 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string:string:string:string:string - - - escape.delim - \ - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + serialization.format + 1 + + + columns.types + string:string:string:string:string:string + + + escape.delim + \ + - - 1 - - - FS_15 + + 1 - + + + + FS_15 + + + + + + + + + - + + + _col0 + + + + + + + + + string + + - - - - - - - - - - _col0 - - - - - - - - - string - - + + + + _col1 - - - - _col1 - - - - - - - - - string - - + + - - - - _col2 - - - - - - - - - string - - + + - - - - _col3 - - - - - - - - - string - - + + string - - - - _col4 - - - - - - - - - string - - + + + + + + _col2 - - - - _col5 - - - - - - - - - string - - + + + + + + + string + - - - - - - - - - - _col5 - - - _col5 - - - c - - - - - - - - _col4 - - - _col4 - - - c - - - - - - - - _col3 - - - _col3 - - - b - - - - - - - - _col2 - - - _col2 - - - b - - - - - - - - _col1 - - - _col1 - - - a - - - - - - - - _col0 - - - _col0 - - - a - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - _col4 - - - _col5 - - - - - - - SEL_13 - - - - - - - - - - - - - - - - c1 + + + + _col3 + + + + + + + + + string + + - - _col0 + + + + _col4 + + + + + + + + + string + + - - c + + + + _col5 + + + + + + + + + string + + - - - - - string - - - - - c2 - - - _col1 - - - c - - - - - - string - - - - - - - c3 - - - _col2 - - - c - - - - - - string - - - - - - - c4 - - - _col3 - - - c - - - - - - string - - - - - - - c5 - - - _col4 - - - c - - - - - - string - - - - - - - c6 - - - _col5 - - - c - - - - - - string - - - - - - + + + + + _col5 + + + _col5 + + + c + + + + + + + + _col4 + + + _col4 + + + c + + + + + + + + _col3 + + + _col3 + + + b + + + + + + + + _col2 + + + _col2 + + + b + + + + + + + + _col1 + + + _col1 + + + a + + + + + + + + _col0 + + + _col0 + + + a + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + _col2 + + + _col3 + + + _col4 + + + _col5 + + + + + + + SEL_13 + + + + + + + + + + + + + + c1 + + + _col0 + + + c + + + + + + string + + + + + + + c2 + + + _col1 + + + c + + + + + + string + + + + + + + c3 + + + _col2 + + + c + + + + + + string + + + + + + + c4 + + + _col3 + + + c + + + + + + string + + + + + + + c5 + + + _col4 + + + c + + + + + + string + + + + + + + c6 + + + _col5 + + + c + + + + + + string + + + + + + + + + @@ -3006,17 +2962,15 @@ JOIN_12 - - - - - - - - - - - + + + + + + + + + Index: ql/src/test/results/compiler/plan/join8.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join8.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/join8.q.xml (working copy) @@ -289,519 +289,507 @@ c:a:src1 - - - - - - - - - + + + + + + + + + + + + VALUE._col1 + + + _col1 + + + + + string + + + + + + + VALUE._col0 + + + _col0 + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + _col1 + + + + + + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string,string + + + escape.delim + \ + + + + + + + + + a + + + RS_6 + + - - - - - VALUE._col1 - - - _col1 - - - - - string - - - - - - - VALUE._col0 - - - _col0 - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - - - a - - - RS_6 - - - - - - - - - - - - - - - - VALUE._col0 - - - a - - - - - - string - - - - - - - VALUE._col1 - - - a - - - - - - string - - - - - - - - + - - - - - - _col1 - - - value - - - src1 - - - - - + + + + + + + + + VALUE._col0 + + + a + + + + + + string + + + + + + + VALUE._col1 + + + a + + + + + + string + + + + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src1 - - - - - + + src1 + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src1 + + + - - SEL_5 - - + + + + + - + + + + - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_5 + + + + + + + + + - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - int - - - - - 10 - - - - - - - - - - - - boolean - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - - 20 - - - - - - - - - - - - - - + + + _col0 - - + + - - + + string - - - - - - - key - - - src1 - - - - - - - + + + _col1 - - + + - - + + string - - - - - - - - FIL_14 + + + + + + + + + + + + + + + + + + + key + + + src1 + + + + + + + + + + + + int + + + + + 10 + + + + + + + + + + + + boolean + + + + + + + + + + + + + key + + + src1 + + + + + + + + + + + + + 20 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + key + + + src1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_14 + + + + - - - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - value - - - src1 - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src1 + + + + + + string + + + + + + + value + + + src1 + + + + + + string + + + + + + + + + @@ -898,510 +886,498 @@ c:b:src2 - - - - - - - - - + + + + + + + + + + + + VALUE._col1 + + + _col1 + + + + + + + + VALUE._col0 + + + _col0 + + + + + + + + + + + + + + + + + + + _col0 + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + joinkey0 + + + serialization.lib + org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe + + + serialization.sort.order + + + + + columns.types + string + + + + + + + 1 + + + -1 + + + + + reducesinkkey0 + + + + + + + _col0 + + + _col1 + + + + + + + + 1 + + + + + + + + + + + + + + + org.apache.hadoop.mapred.SequenceFileInputFormat + + + org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + + columns.types + string,string + + + escape.delim + \ + + + + + + + + + b + + + RS_7 + + - - - - - VALUE._col1 - - - _col1 - - - - - - - - VALUE._col0 - - - _col0 - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - joinkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - 1 - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - - - b - - - RS_7 - - - - - - - - - - - - - - - - VALUE._col0 - - - b - - - - - - string - - - - - - - VALUE._col1 - - - b - - - - - - string - - - - - - - - + - - - - - - _col1 - - - value - - - src2 - - - - - + + + + + + + + + VALUE._col0 + + + b + + + + + + string + + + + + + + VALUE._col1 + + + b + + + + + + string + + + + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src2 - - - - - + + src2 + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src2 + + + - - SEL_2 - - + + + + + - + + + + - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_2 + + + + + + + + + - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - 15 - - - - - - - - - - - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - 25 - - - - - - - - - - - - - - + + + _col0 - - + + - - + + string - - - - - - - key - - - src2 - - - - - - - + + + _col1 - - + + - - + + string - - - - - - - - FIL_15 + + + + + + + + + + + + + + + + + + + key + + + src2 + + + + + + + + + + + + + 15 + + + + + + + + + + + + + + + + + + + + + key + + + src2 + + + + + + + + + + + + + 25 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + key + + + src2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_15 + + + + - - - - - - - - - - - - - - - key - - - src2 - - - - - - string - - - - - - - value - - - src2 - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src2 + + + + + + string + + + + + + + value + + + src2 + + + + + + string + + + + + + + + + @@ -1684,450 +1660,337 @@ - - - - - - - - - + + + + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + hive.serialization.extend.nesting.levels + true + + + columns + _col0,_col1,_col2,_col3 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + serialization.format + 1 + + + columns.types + string:string:string:string + + + escape.delim + \ + + + + + + + 1 + + + + + FS_12 + + - - - - - #### A masked pattern was here #### - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string:string:string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_12 - - - - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - _col2 - - - - - - - - - string - - - - - - - _col3 - - - - - - - - - string - - - - - - - - + - - - - - + + + + + + + + + _col0 + + + + + + + + + string + + + + + + + _col1 + + + + + + + + + string + + + + + + + _col2 + + + + + + + + + string + + + + + + + _col3 + + + + + + + + + string + + + + + + + + + + + + + + _col3 + + _col3 - - - _col3 - - - b - - - - - - - _col2 - - - _col2 - - - b - - - - - + + b - - _col1 - - - _col1 - - - a - - - - - + + - - _col0 - - - _col0 - - - a - - - - - - - - - - - - - - - - - - - - - - - + + _col2 + + + _col2 - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - + + b - - - - SEL_9 - - - - - + + - - - - - - - - c1 - - - _col0 - - - c - - - - - - string - - - - - - - c2 - - - _col1 - - - c - - - - - - string - - - - - - - c3 - - - _col2 - - - c - - - - - - string - - - - - - - c4 - - - _col3 - - - c - - - - - - string - - - - + + _col1 + + + _col1 - - - - - - - - - - - - - - + + a + + + - - - - - - - - - - - - FIL_13 - - - - - - - - - - - - - - - + + _col0 + + _col0 a - + - - string - - - - - _col1 + + + + + + + + - - a + + - - + + - - string + + - - - - _col2 + + + + _col0 - - b + + _col1 - - + + _col2 - - string + + _col3 - - - - _col3 + + + + SEL_9 + + + + + + + + + + + + + + c1 + + + _col0 + + + c + + + + + + string + + - - b + + + + c2 + + + _col1 + + + c + + + + + + string + + - - + + + + c3 + + + _col2 + + + c + + + + + + string + + - - string + + + + c4 + + + _col3 + + + c + + + + + + string + + @@ -2135,9 +1998,110 @@ - - - + + + + + + + + + + + + + + + + + + + + + + + + FIL_13 + + + + + + + + + + + + + + _col0 + + + a + + + + + + string + + + + + + + _col1 + + + a + + + + + + string + + + + + + + _col2 + + + b + + + + + + string + + + + + + + _col3 + + + b + + + + + + string + + + + + + + + + @@ -2319,14 +2283,12 @@ JOIN_8 - - - - - - - - + + + + + + Index: ql/src/test/results/compiler/plan/sample1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample1.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/sample1.q.xml (working copy) @@ -174,577 +174,350 @@ s - - - - - - - - - + + + + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + hive.serialization.extend.nesting.levels + true + + + columns + _col0,_col1,_col2,_col3 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + serialization.format + 1 + + + columns.types + string:string:string:string + + + escape.delim + \ + + + + + + + 1 + + + + + FS_4 + + - - - - - #### A masked pattern was here #### - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string:string:string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_4 - - - - - - - - - - - - - - - - _col0 - - - - - - - - string - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - _col2 - - - - - - - - - string - - - - - - - _col3 - - - - - - - - - string - - - - - - - - + - - - - - - _col3 - - - hr - - - true - - - s - - - - - + + + + + + + + + _col0 + + + + + + + + string + + + + + string + + + + + + + _col1 + + + + + + + + + string + + + + + + + _col2 + + + + + + + + + string + + + + + + + _col3 + + + + + + + + + string + + + + + + + + + + + + + + _col3 + + + hr - - _col2 - - - ds - - - true - - - s - - - - - + + true - - _col1 - - - value - - - s - - - - - + + s - - _col0 - - - key - - - s - - - - - + + - - - - - - - - - - - - - - - - - + + _col2 + + + ds - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - + true - - - - SEL_3 - - - - - + + s - - - - - - - - - - key - - - _col0 - - - s - - - - - - string - - - - - - - value - - - _col1 - - - s - - - - - - string - - - - - - - ds - - - _col2 - - - s - - - - - - string - - - - - - - hr - - - _col3 - - - s - - - - - - string - - - - + + - - - - - - - - true - - - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFRand - - - rand - - - - - - - double - - - - - - - - - - - - - - int - - - - - - - - - - - - 2147483647 - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitAnd - - - & - - - - - - - - - - - - - - - 1 - - - - - - - - - - - - + + _col1 + + + value - - - - - - - 0 - - + + s - - - - - - - - - boolean + + - - - - - - FIL_1 - - - - - - - - - - - - - - - + + _col0 + + key s - + - - string - - - - - value + + + + + + + + - - s + + - - + + - - string + + - - - - ds + + + + _col0 - - s + + _col1 - - + + _col2 - - string + + _col3 - - - - hr + + true + + + + + SEL_3 + + + + + + + + + + + + + + key + + + _col0 + + + s + + + + + + string + + - - s + + + + value + + + _col1 + + + s + + + + + + string + + - - + + + + ds + + + _col2 + + + s + + + + + + string + + - - string + + + + hr + + + _col3 + + + s + + + + + + string + + @@ -752,9 +525,224 @@ - - - + + + + + true + + + + + + + + + + + + + + + + + + + + + + + + + + org.apache.hadoop.hive.ql.udf.UDFRand + + + rand + + + + + + + double + + + + + + + + + + + + + + int + + + + + + + + + + + + 2147483647 + + + + + + + + + true + + + org.apache.hadoop.hive.ql.udf.UDFOPBitAnd + + + & + + + + + + + + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + boolean + + + + + + + + + FIL_1 + + + + + + + + + + + + + + key + + + s + + + + + + string + + + + + + + value + + + s + + + + + + string + + + + + + + ds + + + s + + + + + + string + + + + + + + hr + + + s + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/sample2.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample2.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/sample2.q.xml (working copy) @@ -79,137 +79,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest1 + + + columns.types + string:string + + + serialization.ddl + struct dest1 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_5 + + + + - - FS_5 - - - - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + string + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -652,361 +648,349 @@ s - - - - - - - - - + + + + + + + + + + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_3 + + - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - + - - - - - - _col1 - - - value - - - s - - - - - + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - s - - - - - int - - - - + + s + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 + + s + + + + + int - - _col1 - - - true - - - SEL_2 - - + + + + + - + + + + - - - - - - - - _col0 - - - s - - - - - - int - - - - - - - _col1 - - - s - - - - - - string - - - - + + + + _col0 + + _col1 + + + true + - - - - - - true + + SEL_2 - - - + + + + + + + + - - - - - - - - - - - - - - - key - - - s - - - - - - - - - - - - - - - - - - - - - - - 2147483647 - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitAnd - - - & - - - - - - - - - - - - - - - 2 - - - - + + + _col0 - - + + s - + + + int + - - - + + + _col1 - - 0 + + s + + + + + string + - - - - - - - boolean - - - - - - - - FIL_1 + + + + + true + + + + + + + + + + + + + + + + + + + + + key + + + s + + + + + + + + + + + + + + + + + + + + + + + 2147483647 + + + + + + + + + true + + + org.apache.hadoop.hive.ql.udf.UDFOPBitAnd + + + & + + + + + + + + + + + + + + + 2 + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + boolean + + + + + + + + + + + + FIL_1 + + + + - - - - - - - - - - - - - - - key - - - s - - - - - - int - - - - - - - value - - - s - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + s + + + + + + int + + + + + + + value + + + s + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/sample3.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample3.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/sample3.q.xml (working copy) @@ -79,137 +79,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest1 + + + columns.types + string:string + + + serialization.ddl + struct dest1 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_5 + + + + - - FS_5 - - - - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + string + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -652,371 +648,359 @@ s - - - - - - - - - + + + + + + + + + + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_3 + + - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - + - - - - - - _col1 - - - value - - - s - - - - - + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - s - - - - - int - - - - + + s + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 + + s + + + + + int - - _col1 - - - true - - - SEL_2 - - + + + + + - + + + + - - - - - - - - _col0 - - - s - - - - - - int - - - - - - - _col1 - - - s - - - - - - string - - - - + + + + _col0 + + _col1 + + + true + - - - - - - true + + SEL_2 - - - + + + + + + + + - - - - - - - - - - - - - - - key - - - s - - - - - - - - - - value - - - s - - - - - - - - - - - - - - - - - - - - - - - 2147483647 - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitAnd - - - & - - - - - - - - - - - - - - - 2 - - - - + + + _col0 - - + + s - + + + int + - - - + + + _col1 - - 0 + + s + + + + + string + - - - - - - - boolean - - - - - FIL_1 + + + + + true + + + + + + + + + + + + + + + + + + + + + key + + + s + + + + + + + + + + value + + + s + + + + + + + + + + + + + + + + + + + + + + + 2147483647 + + + + + + + + + true + + + org.apache.hadoop.hive.ql.udf.UDFOPBitAnd + + + & + + + + + + + + + + + + + + + 2 + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + boolean + + + + + + + + + FIL_1 + + + + - - - - - - - - - - - - - - - key - - - s - - - - - - int - - - - - - - value - - - s - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + s + + + + + + int + + + + + + + value + + + s + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/sample4.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample4.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/sample4.q.xml (working copy) @@ -79,137 +79,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest1 + + + columns.types + string:string + + + serialization.ddl + struct dest1 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_5 + + + + - - FS_5 - - - - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + string + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -652,361 +648,349 @@ s - - - - - - - - - + + + + + + + + + + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_3 + + - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - + - - - - - - _col1 - - - value - - - s - - - - - + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - s - - - - - int - - - - + + s + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 + + s + + + + + int - - _col1 - - - true - - - SEL_2 - - + + + + + - + + + + - - - - - - - - _col0 - - - s - - - - - - int - - - - - - - _col1 - - - s - - - - - - string - - - - + + + + _col0 + + _col1 + + + true + - - - - - - true + + SEL_2 - - - + + + + + + + + - - - - - - - - - - - - - - - key - - - s - - - - - - - - - - - - - - - - - - - - - - - 2147483647 - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitAnd - - - & - - - - - - - - - - - - - - - 2 - - - - + + + _col0 - - + + s - + + + int + - - - + + + _col1 - - 0 + + s + + + + + string + - - - - - - - boolean - - - - - - - - FIL_1 + + + + + true + + + + + + + + + + + + + + + + + + + + + key + + + s + + + + + + + + + + + + + + + + + + + + + + + 2147483647 + + + + + + + + + true + + + org.apache.hadoop.hive.ql.udf.UDFOPBitAnd + + + & + + + + + + + + + + + + + + + 2 + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + boolean + + + + + + + + + + + + FIL_1 + + + + - - - - - - - - - - - - - - - key - - - s - - - - - - int - - - - - - - value - - - s - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + s + + + + + + int + + + + + + + value + + + s + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/sample5.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample5.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/sample5.q.xml (working copy) @@ -79,137 +79,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest1 + + + columns.types + string:string + + + serialization.ddl + struct dest1 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_5 + + + + - - FS_5 - - - - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + string + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -652,358 +648,346 @@ s - - - - - - - - - + + + + + + + + + + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_3 + + - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - + - - - - - - _col1 - - - value - - - s - - - - - + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - s - - - - - int - - - - + + s + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 + + s + + + + + int - - _col1 - - - true - - - SEL_2 - - + + + + + - + + + + - - - - - - - - _col0 - - - s - - - - - - int - - - - - - - _col1 - - - s - - - - - - string - - - - + + + + _col0 + + _col1 + + + true + - - - - - - true + + SEL_2 - - - + + + + + + + + - - - - - - - - - - - - - - - key - - - s - - - - - - - - - - - - - - - - - - - - - - - 2147483647 - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitAnd - - - & - - - - - - - - - - - - - - - 5 - - - - + + + _col0 - - + + s - + + + int + - - - + + + _col1 - - 0 + + s + + + + + string + - - - - - - - boolean - - - - - FIL_1 + + + + + true + + + + + + + + + + + + + + + + + + + + + key + + + s + + + + + + + + + + + + + + + + + + + + + + + 2147483647 + + + + + + + + + true + + + org.apache.hadoop.hive.ql.udf.UDFOPBitAnd + + + & + + + + + + + + + + + + + + + 5 + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + boolean + + + + + + + + + FIL_1 + + + + - - - - - - - - - - - - - - - key - - - s - - - - - - int - - - - - - - value - - - s - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + s + + + + + + int + + + + + + + value + + + s + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/sample6.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample6.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/sample6.q.xml (working copy) @@ -79,137 +79,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest1 + + + columns.types + string:string + + + serialization.ddl + struct dest1 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_5 + + + + - - FS_5 - - - - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + string + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -652,361 +648,349 @@ s - - - - - - - - - + + + + + + + + + + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_3 + + - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - + - - - - - - _col1 - - - value - - - s - - - - - + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - s - - - - - int - - - - + + s + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 + + s + + + + + int - - _col1 - - - true - - - SEL_2 - - + + + + + - + + + + - - - - - - - - _col0 - - - s - - - - - - int - - - - - - - _col1 - - - s - - - - - - string - - - - + + + + _col0 + + _col1 + + + true + - - - - - - true + + SEL_2 - - - + + + + + + + + - - - - - - - - - - - - - - - key - - - s - - - - - - - - - - - - - - - - - - - - - - - 2147483647 - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitAnd - - - & - - - - - - - - - - - - - - - 4 - - - - + + + _col0 - - + + s - + + + int + - - - + + + _col1 - - 0 + + s + + + + + string + - - - - - - - boolean - - - - - - - - FIL_1 + + + + + true + + + + + + + + + + + + + + + + + + + + + key + + + s + + + + + + + + + + + + + + + + + + + + + + + 2147483647 + + + + + + + + + true + + + org.apache.hadoop.hive.ql.udf.UDFOPBitAnd + + + & + + + + + + + + + + + + + + + 4 + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + boolean + + + + + + + + + + + + FIL_1 + + + + - - - - - - - - - - - - - - - key - - - s - - - - - - int - - - - - - - value - - - s - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + s + + + + + + int + + + + + + + value + + + s + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/sample7.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample7.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/sample7.q.xml (working copy) @@ -79,137 +79,133 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + name + default.dest1 + + + columns.types + string:string + + + serialization.ddl + struct dest1 { string key, string value} + + + serialization.format + 1 + + + columns + key,value + + + bucket_count + -1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + file.inputformat + org.apache.hadoop.mapred.TextInputFormat + + + location + #### A masked pattern was here #### + + + file.outputformat + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + transient_lastDdlTime + #### A masked pattern was here #### + + + + + + + 1 + + + + + FS_7 + + + + - - FS_7 - - - - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + + + + + + string + + + + + string + + + + + + + value + + + + + + + + + string + + + + + + + + + @@ -652,406 +648,394 @@ s - - - - - - - - - + + + + + + + + + + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 150 + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_4 + + - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 150 - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_4 - - - - - - - - - - - - + - - - - - - _col1 - - - value - - - s - - - - - + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - s - - - - - int - - - - + + s + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 + + s + + + + + int - - _col1 - - - true - - - SEL_3 - - + + + + + - + + + + - - - - - - - - _col0 - - - s - - - - - - int - - - - - - - _col1 - - - s - - - - - - string - - - - + + + + _col0 + + _col1 + + + true + - - - - - - - + + SEL_3 + + + + + + + + + - - - - - - - - - - - - - - - - - - - key - - - s - - - - - - - - - - - - - - - - - - - - - - - 2147483647 - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitAnd - - - & - - - - - - - - - - - - - - - 4 - - - - - - - - - - - - - - - - - - - - 0 - - - - + + + _col0 - - + + s - - - - boolean - - + + + + int + - - - - - - - key - - - s - - - - - - - - - - - - - 100 - - - - + + + _col1 - - + + s - - + + + + string + - - - - - - - - FIL_5 + + + + + + + + + + + + + + + + + + + + + + + + + + + key + + + s + + + + + + + + + + + + + + + + + + + + + + + 2147483647 + + + + + + + + + true + + + org.apache.hadoop.hive.ql.udf.UDFOPBitAnd + + + & + + + + + + + + + + + + + + + 4 + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + boolean + + + + + + + + + + + + + key + + + s + + + + + + + + + + + + + 100 + + + + + + + + + + + + + + + + + + + + + + + + + + + FIL_5 + + + + - - - - - - - - - - - - - - - key - - - s - - - - - - int - - - - - - - value - - - s - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + s + + + + + + int + + + + + + + value + + + s + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/subq.q.xml =================================================================== --- ql/src/test/results/compiler/plan/subq.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/subq.q.xml (working copy) @@ -44,109 +44,105 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + serialization.format + 1 + + + columns.types + string:string + + + + + + + 1 + + + + + FS_7 + + + + - - FS_7 - - - - - - - - - - - - - - - - _col0 - - - - - - - - string - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - - - - + + + + + + + + + _col0 + + + + + + + + string + + + + + string + + + + + + + _col1 + + + + + + + + + string + + + + + + + + + @@ -531,280 +527,268 @@ unioninput:src - - - - - - - - - + + + + + + + + + + + + 1 + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_4 + + - - - - - 1 - - - #### A masked pattern was here #### - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_4 - - - - - - - - - - - - + - - - - - - _col1 - - - value - - - src - - - - - + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src - - - - - + + src + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src - - true + + - - SEL_2 - - + + + + + - + + + + - - - - - - - - key - - - _col0 - - - src - - - - - - string - - - - - - - value - - - _col1 - - - src - - - - - - string - - - - + + + + _col0 + + _col1 + + + true + - - - - - - - + + SEL_2 + + + + + + + + + - - + + key + + _col0 + src - + + + string + - - - - - int - - + + + value - - 100 + + _col1 + + src + + + + + + string + - - - - - - - boolean - - - - - FIL_5 + + + + + + + + + + + key + + + src + + + + + + + + + + + + int + + + + + 100 + + + + + + + + + + + + boolean + + + + + + + + + FIL_5 + + + + - - - - - - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src + + + + + + string + + + + + + + value + + + src + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/udf1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/udf1.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/udf1.q.xml (working copy) @@ -153,1663 +153,1651 @@ src - - - - - - - - - + + + + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + hive.serialization.extend.nesting.levels + true + + + columns + _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11,_col12,_col13,_col14,_col15,_col16 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + serialization.format + 1 + + + columns.types + boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:string:string:string:string + + + escape.delim + \ + + + + + + + 1 + + + + + FS_3 + + - - - - - #### A masked pattern was here #### + + + + + + + + + + + _col0 + + + + + + + + boolean + + + + + boolean + + + + + + + _col1 + + + + + + + + + boolean + + + + + + + _col2 + + + + + + + + + boolean + + + + + + + _col3 + + + + + + + + + boolean + + + + + + + _col4 + + + + + + + + + boolean + + + + + + + _col5 + + + + + + + + + boolean + + + + + + + _col6 + + + + + + + + + boolean + + + + + + + _col7 + + + + + + + + + boolean + + + + + + + _col8 + + + + + + + + + boolean + + + + + + + _col9 + + + + + + + + + boolean + + + + + + + _col10 + + + + + + + + + boolean + + + + + + + _col11 + + + + + + + + + boolean + + + + + + + _col12 + + + + + + + + + boolean + + + + + + + _col13 + + + + + + + + string + + + + + string + + + + + + + _col14 + + + + + + + + + string + + + + + + + _col15 + + + + + + + + + string + + + + + + + _col16 + + + + + + + + + string + + + + + + + + + + + + + + _col8 + + + + + + + - - 1 + + - - #### A masked pattern was here #### + + + + + + - - true + + .* - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11,_col12,_col13,_col14,_col15,_col16 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:string:string:string:string - - - escape.delim - \ - - - - - - - 1 - - - FS_3 + + + + + + true - - - - - - + + org.apache.hadoop.hive.ql.udf.UDFRegExp - - - - - - - - _col0 - - - - - - - - boolean - - - - - boolean - - - - - - - _col1 - - - - - - - - - boolean - - - - - - - _col2 - - - - - - - - - boolean - - - - - - - _col3 - - - - - - - - - boolean - - - - - - - _col4 - - - - - - - - - boolean - - - - - - - _col5 - - - - - - - - - boolean - - - - - - - _col6 - - - - - - - - - boolean - - - - - - - _col7 - - - - - - - - - boolean - - - - - - - _col8 - - - - - - - - - boolean - - - - - - - _col9 - - - - - - - - - boolean - - - - - - - _col10 - - - - - - - - - boolean - - - - - - - _col11 - - - - - - - - - boolean - - - - - - - _col12 - - - - - - - - - boolean - - - - - - - _col13 - - - - - - - - string - - - - - string - - - - - - - _col14 - - - - - - - - - string - - - - - - - _col15 - - - - - - - - - string - - - - - - - _col16 - - - - - - - - - string - - - - - - + + rlike + + + - - - - _col8 - - - - - - - - - - - - + + _col7 + + + + + + + - - - - - - - .* - - + + ab - - - - true + + + + - - org.apache.hadoop.hive.ql.udf.UDFRegExp + + a - - rlike - - - - - - _col7 - - - - - - - - - - ab - - - - - - - - - - a - - - - + + + + true - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFLike - - - like - - + + org.apache.hadoop.hive.ql.udf.UDFLike - - + + like - - _col6 - - - - - - - - - - ab - - + + + + + + + _col6 + + + + + + + - - - - - - - _a% - - + + ab - - - - true + + + + - - org.apache.hadoop.hive.ql.udf.UDFLike + + _a% - - like - - - + + + + + + true + + org.apache.hadoop.hive.ql.udf.UDFLike + + + like + - - _col5 - - - - - - - - - - ab - - + + + + + + + _col5 + + + + + + + - - - - - - - \%\_ - - + + ab - - - - true + + + + - - org.apache.hadoop.hive.ql.udf.UDFLike + + \%\_ - - like - - - + + + + + + true + + org.apache.hadoop.hive.ql.udf.UDFLike + + + like + - - _col4 - - - - - - - - - - %_ - - + + + + + + + _col4 + + + + + + + - - - - - - - \%\_ - - + + %_ - - - - true + + + + - - org.apache.hadoop.hive.ql.udf.UDFLike + + \%\_ - - like - - - + + + + + + true + + org.apache.hadoop.hive.ql.udf.UDFLike + + + like + - - _col3 - - - - - - - - - - ab - - + + + + + + + _col3 + + + + + + + - - - - - - - %a_ - - + + ab - - - - true + + + + - - org.apache.hadoop.hive.ql.udf.UDFLike + + %a_ - - like - - - + + + + + + true + + org.apache.hadoop.hive.ql.udf.UDFLike + + + like + - - _col2 - - - - - - - - - - ab - - + + + + + + + _col2 + + + + + + + - - - - - - - %a% - - + + ab - - - - true + + + + - - org.apache.hadoop.hive.ql.udf.UDFLike + + %a% - - like - - - + + + + + + true + + org.apache.hadoop.hive.ql.udf.UDFLike + + + like + - - _col1 - - - - - - - - - - b - - + + + + + + + _col1 + + + + + + + - - - - - - - %a% - - + + b - - - - true + + + + - - org.apache.hadoop.hive.ql.udf.UDFLike + + %a% - - like - - - + + + + + + true + + org.apache.hadoop.hive.ql.udf.UDFLike + + + like + - - _col9 - - - - - - - - - - a - - + + + + + + + _col9 + + + + + + + - - - - - - - [ab] - - + + a - - - - true + + + + - - org.apache.hadoop.hive.ql.udf.UDFRegExp + + [ab] - - rlike - - - + + + + + + true + + org.apache.hadoop.hive.ql.udf.UDFRegExp + + + rlike + - - _col13 - - - - - - - - - - abc - - + + + + + + + _col13 + + + + + + + - - - - - - - b - - + + abc - - - - - - - c - - + + + + + + + + b + - - - - org.apache.hadoop.hive.ql.udf.UDFRegExpReplace + + + + - - regexp_replace + + c - - + + + + + + org.apache.hadoop.hive.ql.udf.UDFRegExpReplace + + regexp_replace + - - _col12 - - - - - - - - - - hadoop - - + + + + + + + _col12 + + + + + + + - - - - - - - o* - - + + hadoop - - - - true + + + + - - org.apache.hadoop.hive.ql.udf.UDFRegExp + + o* - - rlike - - - + + + + + + true + + org.apache.hadoop.hive.ql.udf.UDFRegExp + + + rlike + - - _col11 - - - - - - - - - - hadoop - - + + + + + + + _col11 + + + + + + + - - - - - - - [a-z]* - - + + hadoop - - - - true + + + + - - org.apache.hadoop.hive.ql.udf.UDFRegExp + + [a-z]* - - rlike - - - + + + + + + true + + org.apache.hadoop.hive.ql.udf.UDFRegExp + + + rlike + - - _col10 - - - - - - - - - - - - + + + + + + + _col10 + + + + + + + - - - - - - - [ab] - - + + - - - - true + + + + - - org.apache.hadoop.hive.ql.udf.UDFRegExp + + [ab] - - rlike - - - + + + + + + true + + org.apache.hadoop.hive.ql.udf.UDFRegExp + + + rlike + - - _col16 - - - - - - - - - - hadoop - - + + + + + + + _col16 + + + + + + + - - - - - - - (.)[a-z]* - - + + hadoop - - - - - - - $1ive - - + + + + + + + + (.)[a-z]* + - - - - org.apache.hadoop.hive.ql.udf.UDFRegExpReplace + + + + - - regexp_replace + + $1ive - - + + + + + + org.apache.hadoop.hive.ql.udf.UDFRegExpReplace + + regexp_replace + - - _col15 - - - - - - - - - - abbbb - - + + + + + + + _col15 + + + + + + + - - - - - - - bb - - + + abbbb - - - - - - - b - - + + + + + + + + bb + - - - - org.apache.hadoop.hive.ql.udf.UDFRegExpReplace + + + + - - regexp_replace + + b - - + + + + + + org.apache.hadoop.hive.ql.udf.UDFRegExpReplace + + regexp_replace + - - _col14 - - - - - - - - - - abc - - + + + + + + + _col14 + + + + + + + - - - - - - - z - - + + abc - - - - - - - a - - + + + + + + + + z + - - - - org.apache.hadoop.hive.ql.udf.UDFRegExpReplace + + + + - - regexp_replace + + a - - + + + + + + org.apache.hadoop.hive.ql.udf.UDFRegExpReplace + + regexp_replace + - - _col0 - - - - - - - - - - a - - + + + + + + + _col0 + + + + + + + - - - - - - - %a% - - + + a - - - - true + + + + - - org.apache.hadoop.hive.ql.udf.UDFLike + + %a% - - like - - - + + + + + + true + + org.apache.hadoop.hive.ql.udf.UDFLike + + + like + + + + - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + _col2 + + + _col3 + + + _col4 + + + _col5 + + + _col6 + + + _col7 + + + _col8 + + + _col9 + + + _col10 + + + _col11 + + + _col12 + + + _col13 + + + _col14 + + + _col15 + + + _col16 + + + + + + + SEL_2 + + + + + + + + + + + + + + _c0 - - + + _col0 - - + + - - + + boolean - - + + + + + + _c1 - - + + _col1 - - + + - - + + boolean - - + + + + + + _c2 - - + + _col2 - - + + - - + + boolean - - + + + + + + _c3 - - + + _col3 - - + + - - + + boolean - - - - - - - _col0 + + + + _c4 - - _col1 + + _col4 - - _col2 + + - - _col3 + + boolean - - _col4 + + + + + + _c5 - + _col5 - + + + + + boolean + + + + + + + _c6 + + _col6 - + + + + + boolean + + + + + + + _c7 + + _col7 - + + + + + boolean + + + + + + + _c8 + + _col8 - + + + + + boolean + + + + + + + _c9 + + _col9 - - _col10 + + - - _col11 + + boolean - - _col12 + + + + + + _c10 - - _col13 + + _col10 - - _col14 + + - - _col15 + + boolean - - _col16 - - - - - SEL_2 - - - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - boolean - - + + + _c11 - - - - _c1 - - - _col1 - - - - - - boolean - - + + _col11 - - - - _c2 - - - _col2 - - - - - - boolean - - + + - - - - _c3 - - - _col3 - - - - - - boolean - - + + boolean - - - - _c4 - - - _col4 - - - - - - boolean - - + + + + + + _c12 - - - - _c5 - - - _col5 - - - - - - boolean - - + + _col12 - - - - _c6 - - - _col6 - - - - - - boolean - - + + - - - - _c7 - - - _col7 - - - - - - boolean - - + + boolean - - - - _c8 - - - _col8 - - - - - - boolean - - + + + + + + _c13 - - - - _c9 - - - _col9 - - - - - - boolean - - + + _col13 - - - - _c10 - - - _col10 - - - - - - boolean - - + + - - - - _c11 - - - _col11 - - - - - - boolean - - + + string - - - - _c12 - - - _col12 - - - - - - boolean - - + + + + + + _c14 - - - - _c13 - - - _col13 - - - - - - string - - + + _col14 - - - - _c14 - - - _col14 - - - - - - string - - + + - - - - _c15 - - - _col15 - - - - - - string - - + + string - - - - _c16 - - - _col16 - - - - - - string - - - - - - - - - - - - - - - - - - key + + + _c15 - - src + + _col15 - + + + string + - - - - - int - - + + + _c16 - - 86 + + _col16 + + + + + string + - - - - - - - - FIL_4 + + + + + + + + + + + key + + + src + + + + + + + + + + + + int + + + + + 86 + + + + + + + + + + + + + + + + + FIL_4 + + + + - - - - - - - - - - - - - - - key - - - src - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src + + + + + + string + + + + + + + + + Index: ql/src/test/results/compiler/plan/udf4.q.xml =================================================================== --- ql/src/test/results/compiler/plan/udf4.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/udf4.q.xml (working copy) @@ -133,1452 +133,1444 @@ dest1 - - - - - - - - - - - #### A masked pattern was here #### + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat - - 1 + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + hive.serialization.extend.nesting.levels + true - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11,_col12,_col13,_col14,_col15,_col16,_col17,_col18 - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11,_col12,_col13,_col14,_col15,_col16,_col17,_col18 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - double:double:double:bigint:bigint:bigint:double:double:double:bigint:bigint:bigint:bigint:double:int:int:int:int:int - - - escape.delim - \ - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + serialization.format + 1 + + + columns.types + double:double:double:bigint:bigint:bigint:double:double:double:bigint:bigint:bigint:bigint:double:int:int:int:int:int + + + escape.delim + \ + - - 1 - - - FS_2 + + 1 - + + + + FS_2 + + + + + + + + + - - - - - - - - - - - - _col0 - - - - - - - - double - - - - - double - - + + + _col0 - - - - _col1 - - - - - - - - - double - - + + - - - - _col2 - - - - - - - + + double - - - - _col3 - - - - - - - - bigint - - - - - bigint - - + + double - - - - _col4 - - - - - - - - - bigint - - + + + + + + _col1 - - - - _col5 - - - - - - - - - bigint - - + + - - - - _col6 - - - - - - - - - double - - + + - - - - _col7 - - - - - - - - - double - - + + double - - - - _col8 - - - - - - - - - double - - + + + + + + _col2 - - - - _col9 - - - - - - - - - bigint - - + + - - - - _col10 - - - - - - - - - bigint - - + + - - - - _col11 - - - - - - - - - bigint - - + + double - - - - _col12 - - - - - - - - - bigint - - + + + + + + _col3 - - - - _col13 - - - - - - - - - double - - + + - - - - _col14 - - - - - - - - int - - - + + - int + bigint - - - - _col15 - - - - - - - - - int - - + + bigint - - - - _col16 - - - - - - - - - int - - - - - - - _col17 - - - - - - - - - int - - - - - - - _col18 - - - - - - - - - int - - - - - - - - - - - - - _col8 - - - - - - + + + _col4 - - 0.0 + + + + + + + bigint + - - - - - - org.apache.hadoop.hive.ql.udf.UDFSqrt - - - sqrt - - - - - - - - - - _col7 - - - - - - - - - - - - - 1.0 - - - - + + + _col5 - - + + - - + + + + bigint + - - - - - - org.apache.hadoop.hive.ql.udf.UDFSqrt - - - sqrt - - - - - - - - - - _col6 - - - - - + + + _col6 + + + + + - - 1.0 + + double - - - - - - org.apache.hadoop.hive.ql.udf.UDFSqrt - - - sqrt - - - - - - - - - - _col5 - - - - - - - - - - - - - 1.5 - - - - + + + _col7 - - + + - + + + double + - - - - - - - - - - - - _col4 - - - - - - + + + _col8 - - 1.5 + + - - - - - - - - - - - - - - _col3 - - - - - - + - - 1.0 + + double - - - - - - - - - - - - _col2 - - - - - - - - - - - - - 1.5 - - - - + + + _col9 - - + + - - + + + + bigint + - - - - - - - - - - - - _col1 - - - - - - + + + _col10 - - 1.5 + + - - - - - - - - - - - - - - _col9 - - - - - - - + + - - 1.0 + + bigint - - - - - - - - - - - - _col13 - - - - - - + + + _col11 - - 3 + + - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFRand - - - rand - - - - - - - - - - _col12 - - - - - - - + + - - 1.0 + + bigint - - - - - - - - - - - - _col11 - - - - - - - - - - - - - 1.5 - - - - + + + _col12 - - + + - - + + + + bigint + - - - - - - - - - - - - _col10 - - - - - + + + _col13 + + + + + - - 1.5 + + double - - - - - - - - - - - - _col17 - - - - - - + + + _col14 - - 1 + + - - - - - - - - - - - - - 2 - - + + + + int - - + + int - - - - - - - - - - - - - - - _col16 - - - - - + + + _col15 + + + + + - - 1 + + int - - + + + _col16 + + + + + - - 2 + + int - - - - - - - - - - - - _col15 - - - - - + + + _col17 + + + + + - - 3 + + int - - - - - - - - - - - - _col14 - - - - - - 3 - - - - - _col0 - - - - - - + + + _col18 - - 1.0 + + - - - - - - - - - - - - - - _col18 - - - - - - + - - 1 + + int - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitNot - - - ~ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - _col4 - - - _col5 - - - _col6 - - - _col7 - - - _col8 - - - _col9 - - - _col10 - - - _col11 - - - _col12 - - - _col13 - - - _col14 - - - _col15 - - - _col16 - - - _col17 - - - _col18 - - - - + + + + + _col8 + + + + + + + + + + 0.0 + + + + + + + + + org.apache.hadoop.hive.ql.udf.UDFSqrt + + + sqrt + + + + + + + + + + _col7 + + + + + + + + + + + + + + 1.0 + + + + + + + + + + + + + + + + + + + org.apache.hadoop.hive.ql.udf.UDFSqrt + + + sqrt + + + + + + + + + + _col6 + + + + + + + + + + 1.0 + + + + + + + + + org.apache.hadoop.hive.ql.udf.UDFSqrt + + + sqrt + + + + + + + + + + _col5 + + + + + + + + + + + + + + 1.5 + + + + + + + + + + + + + + + + + + + + + + + + + _col4 + + + + + + + + + + 1.5 + + + + + + + + + + + + + + + _col3 + + + + + + + + + + 1.0 + + + + + + + + + + + + + + + _col2 + + + + + + + + + + + + + + 1.5 + + + + + + + + + + + + + + + + + + + + + + + + + _col1 + + + + + + + + + + 1.5 + + + + + + + + + + + + + + + _col9 + + + + + + + + + + 1.0 + + + + + + + + + + + + + + + _col13 + + + + + + + + + + 3 + + + + + + + + + org.apache.hadoop.hive.ql.udf.UDFRand + + + rand + + + + + + + + + + _col12 + + + + + + + + + + 1.0 + + + + + + + + + + + + + + + _col11 + + + + + + + + + + + + + + 1.5 + + + + + + + + + + + + + + + + + + + + + + + + + _col10 + + + + + + + + + + 1.5 + + + + + + + + + + + + + + + _col17 + + + + + + + + + + 1 + + + + + + + + + + + + + + 2 + + + + + + + + + + + + + + + + + + + + + + + + + _col16 + + + + + + + + + + 1 + + + + + + + + + + 2 + + + + + + + + + + + + + + + _col15 + + + + + + + + + + 3 + + + + + + + + + + + + + + + _col14 + + + + + + 3 + + + + + _col0 + + + + + + + + + + 1.0 + + + + + + + + + + + + + + + _col18 + + + + + + + + + + 1 + + + + + + + + + true + + + org.apache.hadoop.hive.ql.udf.UDFOPBitNot + + + ~ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + _col2 + + + _col3 + + + _col4 + + + _col5 + + + _col6 + + + _col7 + + + _col8 + + + _col9 + + + _col10 + + + _col11 + + + _col12 + + + _col13 + + + _col14 + + + _col15 + + + _col16 + + + _col17 + + + _col18 + + + + + + + SEL_1 + + + + - - SEL_1 - - - - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - double - - - - - - - _c1 - - - _col1 - - - - - - double - - - - - - - _c2 - - - _col2 - - - - - - double - - - - - - - _c3 - - - _col3 - - - - - - bigint - - - - - - - _c4 - - - _col4 - - - - - - bigint - - - - - - - _c5 - - - _col5 - - - - - - bigint - - - - - - - _c6 - - - _col6 - - - - - - double - - - - - - - _c7 - - - _col7 - - - - - - double - - - - - - - _c8 - - - _col8 - - - - - - double - - - - - - - _c9 - - - _col9 - - - - - - bigint - - - - - - - _c10 - - - _col10 - - - - - - bigint - - - - - - - _c11 - - - _col11 - - - - - - bigint - - - - - - - _c12 - - - _col12 - - - - - - bigint - - - - - - - _c13 - - - _col13 - - - - - - double - - - - - - - _c14 - - - _col14 - - - - - - int - - - - - - - _c15 - - - _col15 - - - - - - int - - - - - - - _c16 - - - _col16 - - - - - - int - - - - - - - _c17 - - - _col17 - - - - - - int - - - - - - - _c18 - - - _col18 - - - - - - int - - - - - - - - - - + + + + + + + + + _c0 + + + _col0 + + + + + + double + + + + + + + _c1 + + + _col1 + + + + + + double + + + + + + + _c2 + + + _col2 + + + + + + double + + + + + + + _c3 + + + _col3 + + + + + + bigint + + + + + + + _c4 + + + _col4 + + + + + + bigint + + + + + + + _c5 + + + _col5 + + + + + + bigint + + + + + + + _c6 + + + _col6 + + + + + + double + + + + + + + _c7 + + + _col7 + + + + + + double + + + + + + + _c8 + + + _col8 + + + + + + double + + + + + + + _c9 + + + _col9 + + + + + + bigint + + + + + + + _c10 + + + _col10 + + + + + + bigint + + + + + + + _c11 + + + _col11 + + + + + + bigint + + + + + + + _c12 + + + _col12 + + + + + + bigint + + + + + + + _c13 + + + _col13 + + + + + + double + + + + + + + _c14 + + + _col14 + + + + + + int + + + + + + + _c15 + + + _col15 + + + + + + int + + + + + + + _c16 + + + _col16 + + + + + + int + + + + + + + _c17 + + + _col17 + + + + + + int + + + + + + + _c18 + + + _col18 + + + + + + int + + + + + + + + + Index: ql/src/test/results/compiler/plan/udf6.q.xml =================================================================== --- ql/src/test/results/compiler/plan/udf6.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/udf6.q.xml (working copy) @@ -153,297 +153,289 @@ src - - - - - - - - - - - #### A masked pattern was here #### + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat - - 1 + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat + + + + hive.serialization.extend.nesting.levels + true - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + columns + _col0,_col1 - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:int - - - escape.delim - \ - - + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - - 1 - - - - - FS_2 - - - - - - - - - - - - - - - - _col0 - - - - - - - - string - - - - - string - - + + serialization.format + 1 - - - - _col1 - - - - - - - - int - - - - - int - - + + columns.types + string:int + + escape.delim + \ + + + 1 + - - - - - - _col1 - - + + FS_2 + + + + + + + + + - - - + + + _col0 + + + + + + - boolean + string - - true + + string - - - + + + _col1 - - 1 + + - - - - - - + + + + int + + - - 2 + + int - - - - - - - - _col0 - - - - - - - - - - a - - - - - - - - - - b - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - + + + + + _col1 + + + + + + + + + boolean + + + + + true + + + + + + + + + + 1 + + + + + + + + + + 2 + + + + + + + + + + + + + + + _col0 + + + + + + + + + + a + + + + + + + + + + b + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_1 + + + + - - SEL_1 - - - - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - string - - - - - - - _c1 - - - _col1 - - - - - - int - - - - - - - - - - + + + + + + + + + _c0 + + + _col0 + + + + + + string + + + + + + + _c1 + + + _col1 + + + + + + int + + + + + + + + + Index: ql/src/test/results/compiler/plan/udf_case.q.xml =================================================================== --- ql/src/test/results/compiler/plan/udf_case.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/udf_case.q.xml (working copy) @@ -153,381 +153,369 @@ src - - - - - - - - - + + + + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + hive.serialization.extend.nesting.levels + true + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + serialization.format + 1 + + + columns.types + int:int + + + escape.delim + \ + + + + + + + 1 + + + + + FS_3 + + - - - - - #### A masked pattern was here #### - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - int:int - - - escape.delim - \ - - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - - - - _col0 - - - - - - - - int - - - - - int - - - - - - - _col1 - - - - - - - - - int - - - - - - - - + - - - - - - 1 - - - - - LIM_2 - - - - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - int - - - - - - - _c1 - - - _col1 - - - - - - int - - - - - - - - + + + + + + + + + _col0 + + + + + + + + int + + + + + int + + + + + + + _col1 + + + + + + + + + int + + + + + + + + + - - - - - - _col1 - - - - - - - - - - 11 - - - - - - - - - - 12 - - - - - - - - - - 13 - - - - - - - - - - 14 - - - - - - - - - - 15 - - - - + + + + 1 - - - - - - - - _col0 - - - + + LIM_2 + + + + + + + + + + - - - + + + _c0 - - 1 + + _col0 - - - - - + - - 1 + + int - - - + + + _c1 - - 2 + + _col1 - - - - - + - - 3 + + int - - - - - - - 4 - - - - - - - - - - 5 - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - + + + + + _col1 + + + + + + + + + + 11 + + + + + + + + + + 12 + + + + + + + + + + 13 + + + + + + + + + + 14 + + + + + + + + + + 15 + + + + + + + + + + + + + + + _col0 + + + + + + + + + + 1 + + + + + + + + + + 1 + + + + + + + + + + 2 + + + + + + + + + + 3 + + + + + + + + + + 4 + + + + + + + + + + 5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_1 + + + + - - SEL_1 - - - - - - - - - - - - - - - - - - + + + + + + + + + + Index: ql/src/test/results/compiler/plan/udf_when.q.xml =================================================================== --- ql/src/test/results/compiler/plan/udf_when.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/udf_when.q.xml (working copy) @@ -153,461 +153,449 @@ src - - - - - - - - - + + + + + + + + + + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + hive.serialization.extend.nesting.levels + true + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + serialization.format + 1 + + + columns.types + int:int + + + escape.delim + \ + + + + + + + 1 + + + + + FS_3 + + - - - - - #### A masked pattern was here #### - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - int:int - - - escape.delim - \ - - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - - - - _col0 - - - - - - - - int - - - - - int - - - - - - - _col1 - - - - - - - - - int - - - - - - - - + - - - - - - 1 - - - - - LIM_2 - - - - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - int - - - - - - - _c1 - - - _col1 - - - - - - int - - - - - - - - + + + + + + + + + _col0 + + + + + + + + int + + + + + int + + + + + + + _col1 + + + + + + + + + int + + + + + + + + + - - - - - - _col1 - - - - - - - - - - - - - - 12 - - - - - - - - - - 11 - - - - - - - - - - - - boolean - - - - - - - - - - - - 13 - - - - - - - - - - - - - - 14 - - - - - - - - - - 10 - - - - - - - - - - - - - - - - - - - - 15 - - - - + + + + 1 - - - - - - - - _col0 - - - + + LIM_2 + + + + + + + + + + - - - - - - - - - - 1 - - - - - - - - - - 1 - - - - + + + _c0 - - + + _col0 - - - - - - - - + - - 2 + + int - - - - - - - - - - 3 - - - - - - - - - - 5 - - - - + + + _c1 - - + + _col1 - - - - - - - - + - - 4 + + int - - - - - - - 5 - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - + + + + + _col1 + + + + + + + + + + + + + + 12 + + + + + + + + + + 11 + + + + + + + + + + + + boolean + + + + + + + + + + + + 13 + + + + + + + + + + + + + + 14 + + + + + + + + + + 10 + + + + + + + + + + + + + + + + + + + + 15 + + + + + + + + + + + + + + + _col0 + + + + + + + + + + + + + + 1 + + + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + + 2 + + + + + + + + + + + + + + 3 + + + + + + + + + + 5 + + + + + + + + + + + + + + + + + + + + 4 + + + + + + + + + + 5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + + + SEL_1 + + + + - - SEL_1 - - - - - - - - - - - - - - - - - - + + + + + + + + + + Index: ql/src/test/results/compiler/plan/union.q.xml =================================================================== --- ql/src/test/results/compiler/plan/union.q.xml (revision 1553449) +++ ql/src/test/results/compiler/plan/union.q.xml (working copy) @@ -44,109 +44,105 @@ #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string - - - - - - - 1 - - + + + + + + #### A masked pattern was here #### + + + 1 + + + + + org.apache.hadoop.mapred.TextInputFormat + + + org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + + + + + columns + _col0,_col1 + + + serialization.lib + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + + serialization.format + 1 + + + columns.types + string:string + + + + + + + 1 + + + + + FS_12 + + + + - - FS_12 - - - - - - - - - - - - - - - - _col0 - - - - - - - - string - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - - - - + + + + + + + + + _col0 + + + + + + + + string + + + + + string + + + + + + + _col1 + + + + + + + + + string + + + + + + + + + @@ -667,471 +663,431 @@ null-subquery1:unioninput-subquery1:src - - - - - - - - - + + + + + + + + + - + - - - - - - - - - - - 1 - - - #### A masked pattern was here #### - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_8 - - - - - - - - - - - - - - + + + + + + 1 + + + #### A masked pattern was here #### + + + 1 + + + #### A masked pattern was here #### + + + true + + + + + + 1 + + + + + FS_8 + + + + - - - - _col1 - - - _col1 - - - src - - - - - + + + + + + + + + + + _col1 + + + _col1 + + + src + + + + + + + + _col0 + + + _col0 + + + src + + + + + + + + + + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + true + + + + + SEL_7 + + + + + + + + + + + + + + key - + _col0 - - - _col0 - - - src - - - - - + + src + + + + + + string + - - - - - - - - - - - + + + + value - - - - _col0 - - - _col1 - - + + _col1 - - true + + src + + + + + string + - - SEL_7 + + + + + + + + + + + + UNION_6 + + + + + + + + + + + + + + + + _col1 + + + value - - - - - - + + src - - - - - - - - key - - - _col0 - - - src - - - - - - string - - - - - - - value - - - _col1 - - - src - - - - - - string - - - - - - + + + + _col0 + + + key + + + src + + + + + + - + + + + + + + + + + + + + + + _col0 + + + _col1 + + + + + true + + - UNION_6 + SEL_5 - - - - - - - - - - - - + + + + + - - - - _col1 - - - value - - - src - - - - - + + + + + + + + + + + key + + + src + + + + + + + + + + + + int + + + + + 100 + + + + + + + + + + + + boolean + + + + + + + + + FIL_10 + + + + + + + + - - _col0 - - - key - - + + + src - - + + - - - - - + - + 0 - + 1 - + - _col0 + key - _col1 + value - - true + + TS_3 - - - - SEL_5 - - - - - - + + + - + + + key + + + src + + + + + + string + + - - - - - - - - - - - - key - - - src - - - - - - - - - - - - int - - - - - 100 - - - - + + + + value - - + + src - - - - boolean - - + + + + string + - - - - FIL_10 - - - - - - - - - - + + + true - - - - src - - - - - + + BLOCK__OFFSET__INSIDE__FILE - - - - 0 - - - 1 - - + + src - - - - key + + + + bigint - - value - - - TS_3 + + bigint - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - - - - - - - - - + + + + true - - + + INPUT__FILE__NAME + + src + + + + + + string + @@ -1140,49 +1096,23 @@ - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - - - - - - - + + + + + + + + + + + + + + + + + @@ -1226,212 +1156,248 @@ - - - - - - _col1 - - - value - - - src - - - - - + + + + + + + + + _col0 + + + src + + + + + + string + + + + + + + _col1 + + + src + + + + + + string + + + + + + + + + + + + + + _col1 + + + value - - _col0 - - - key - - - src - - - - - + + src + + + - - - - - - - - - - - + + _col0 + + + key - - - - _col0 - - - _col1 - - + + src + + + - - SEL_2 - - + + + + + - + + + + - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - + + + + _col0 + + _col1 + - - - - - - - + + SEL_2 + + + + + + + + + - - - key + + + _col0 src - + + + string + - - - + + + _col1 - - 100 + + src + + + + + string + - - - - - - - - FIL_9 + + + + + + + + + + + key + + + src + + + + + + + + + + + + + 100 + + + + + + + + + + + + + + + + + FIL_9 + + + + - - - - - - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - - - - + + + + + + + + + key + + + src + + + + + + string + + + + + + + value + + + src + + + + + + string + + + + + + + + + Index: ql/src/test/templates/TestCliDriver.vm =================================================================== --- ql/src/test/templates/TestCliDriver.vm (revision 1553449) +++ ql/src/test/templates/TestCliDriver.vm (working copy) @@ -25,6 +25,7 @@ import java.util.*; import org.apache.hadoop.hive.ql.QTestUtil; +import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType; import org.apache.hadoop.hive.ql.session.SessionState; public class $className extends TestCase { @@ -33,10 +34,12 @@ private static QTestUtil qt; static { + + MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode"); + String hiveConfDir = "$hiveConfDir"; try { - boolean miniMR = "$clusterMode".equals("miniMR"); String hadoopVer = "$hadoopVersion"; - qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, hadoopVer); + qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, hiveConfDir, hadoopVer); // do a one time initialization qt.cleanUp(); @@ -46,7 +49,7 @@ System.err.println("Exception: " + e.getMessage()); e.printStackTrace(); System.err.flush(); - fail("Unexpected exception in static initialization"); + fail("Unexpected exception in static initialization"+e.getMessage()); } } Index: ql/src/test/templates/TestNegativeCliDriver.vm =================================================================== --- ql/src/test/templates/TestNegativeCliDriver.vm (revision 1553449) +++ ql/src/test/templates/TestNegativeCliDriver.vm (working copy) @@ -25,6 +25,7 @@ import java.util.*; import org.apache.hadoop.hive.ql.QTestUtil; +import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType; import org.apache.hadoop.hive.ql.exec.Task; public class $className extends TestCase { @@ -33,8 +34,9 @@ private static QTestUtil qt; static { + MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode"); + try { - boolean miniMR = "$clusterMode".equals("miniMR"); String hadoopVer = "$hadoopVersion"; qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, hadoopVer); // do a one time initialization Index: ql/src/test/templates/TestParse.vm =================================================================== --- ql/src/test/templates/TestParse.vm (revision 1553449) +++ ql/src/test/templates/TestParse.vm (working copy) @@ -25,6 +25,7 @@ import java.util.*; import org.apache.hadoop.hive.ql.QTestUtil; +import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType; import org.apache.hadoop.hive.ql.exec.Task; public class $className extends TestCase { @@ -31,10 +32,11 @@ private static final String HIVE_ROOT = QTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root")); private static QTestUtil qt; + + static { + MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode"); - static { try { - boolean miniMR = "$clusterMode".equals("miniMR"); String hadoopVer = "$hadoopVersion"; qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, hadoopVer); qt.init(null); Index: ql/src/test/templates/TestParseNegative.vm =================================================================== --- ql/src/test/templates/TestParseNegative.vm (revision 1553449) +++ ql/src/test/templates/TestParseNegative.vm (working copy) @@ -25,6 +25,7 @@ import java.util.*; import org.apache.hadoop.hive.ql.QTestUtil; +import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType; import org.apache.hadoop.hive.ql.exec.Task; public class $className extends TestCase { @@ -33,8 +34,10 @@ private static QTestUtil qt; static { + + MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode"); + try { - boolean miniMR = "$clusterMode".equals("miniMR"); String hadoopVer = "$hadoopVersion"; qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, hadoopVer); } catch (Exception e) { Index: shims/0.23/pom.xml =================================================================== --- shims/0.23/pom.xml (revision 1553449) +++ shims/0.23/pom.xml (working copy) @@ -87,6 +87,81 @@ org.mortbay.jetty jetty ${jetty.version} + true + + org.apache.tez + tez-api + ${tez.version} + true + + + org.apache.tez + tez-dag + ${tez.version} + true + + + org.apache.tez + tez-common + ${tez.version} + true + + + org.apache.tez + tez-runtime-library + ${tez.version} + true + + + org.apache.tez + tez-mapreduce + ${tez.version} + true + + + org.apache.tez + tez-runtime-internals + ${tez.version} + true + + + org.apache.hadoop + hadoop-hdfs + ${hadoop-23.version} + true + + + org.apache.hadoop + hadoop-yarn-api + ${hadoop-23.version} + true + + + org.apache.hadoop + hadoop-yarn-common + ${hadoop-23.version} + true + + + org.apache.hadoop + hadoop-yarn-client + ${hadoop-23.version} + true + + + org.apache.tez + tez-tests + ${tez.version} + true + test-jar + + + org.apache.hadoop + hadoop-yarn-server-tests + ${hadoop-23.version} + true + test-jar + Index: shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java =================================================================== --- shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java (revision 1553449) +++ shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java (working copy) @@ -59,13 +59,15 @@ import org.apache.hadoop.util.Progressable; import org.apache.hadoop.mapred.lib.TotalOrderPartitioner; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.tez.test.MiniTezCluster; - /** * Implemention of shims against Hadoop 0.23.0. */ public class Hadoop23Shims extends HadoopShimsSecure { + HadoopShims.MiniDFSShim cluster = null; + @Override public String getTaskAttemptLogUrl(JobConf conf, String taskTrackerHttpAddress, String taskAttemptId) @@ -182,7 +184,12 @@ private final MiniMRCluster mr; private final Configuration conf; - public MiniMrShim(Configuration conf, int numberOfTaskTrackers, + public MiniMrShim() { + mr = null; + conf = null; + } + + public MiniMrShim(Configuration conf, int numberOfTaskTrackers, String nameNode, int numDir) throws IOException { this.conf = conf; @@ -219,6 +226,73 @@ } } + /** + * Returns a shim to wrap MiniMrTez + */ + public MiniMrShim getMiniTezCluster(Configuration conf, int numberOfTaskTrackers, + String nameNode, int numDir) throws IOException { + return new MiniTezShim(conf, numberOfTaskTrackers, nameNode, numDir); + } + + /** + * Shim for MiniTezCluster + */ + public class MiniTezShim extends Hadoop23Shims.MiniMrShim { + + private final MiniTezCluster mr; + private final Configuration conf; + + public MiniTezShim(Configuration conf, int numberOfTaskTrackers, + String nameNode, int numDir) throws IOException { + + mr = new MiniTezCluster("hive", numberOfTaskTrackers); + conf.set("fs.defaultFS", nameNode); + conf.set(MRJobConfig.MR_AM_STAGING_DIR, "/apps_staging_dir"); + mr.init(conf); + mr.start(); + this.conf = mr.getConfig(); + } + + @Override + public int getJobTrackerPort() throws UnsupportedOperationException { + String address = conf.get("yarn.resourcemanager.address"); + address = StringUtils.substringAfterLast(address, ":"); + + if (StringUtils.isBlank(address)) { + throw new IllegalArgumentException("Invalid YARN resource manager port."); + } + + return Integer.parseInt(address); + } + + @Override + public void shutdown() throws IOException { + mr.stop(); + } + + @Override + public void setupConfiguration(Configuration conf) { + Configuration config = mr.getConfig(); + for (Map.Entry pair: config) { + conf.set(pair.getKey(), pair.getValue()); + } + + Path jarPath = new Path("hdfs:///user/hive"); + Path hdfsPath = new Path("hdfs:///user/"); + try { + FileSystem fs = cluster.getFileSystem(); + jarPath = fs.makeQualified(jarPath); + conf.set("hive.jar.directory", jarPath.toString()); + fs.mkdirs(jarPath); + hdfsPath = fs.makeQualified(hdfsPath); + conf.set("hive.user.install.directory", hdfsPath.toString()); + fs.mkdirs(hdfsPath); + } catch (Exception e) { + e.printStackTrace(); + } + } + } + // Don't move this code to the parent class. There's a binary // incompatibility between hadoop 1 and 2 wrt MiniDFSCluster and we // need to have two different shim classes even though they are @@ -227,7 +301,8 @@ int numDataNodes, boolean format, String[] racks) throws IOException { - return new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks)); + cluster = new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks)); + return cluster; } /**