Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java =================================================================== --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1034599) +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy) @@ -231,6 +231,8 @@ // mapper/reducer memory in local mode HIVEHADOOPMAXMEM("hive.mapred.local.mem", 0), + //small table file size + HIVESMALLTABLESFILESIZE("hive.smalltable.filesize",25000000L), //25M // test mode in hive mode HIVETESTMODE("hive.test.mode", false), HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_"), @@ -243,6 +245,7 @@ HIVEMERGEMAPFILESAVGSIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000)), HIVESKEWJOIN("hive.optimize.skewjoin", false), + HIVECONVERTJOIN("hive.auto.convert.join", false), HIVESKEWJOINKEY("hive.skewjoin.key", 1000000), HIVESKEWJOINMAPJOINNUMMAPTASK("hive.skewjoin.mapjoin.map.tasks", 10000), HIVESKEWJOINMAPJOINMINSPLIT("hive.skewjoin.mapjoin.min.split", 33554432), //32M @@ -251,7 +254,13 @@ HIVESENDHEARTBEAT("hive.heartbeat.interval", 1000), HIVEMAXMAPJOINSIZE("hive.mapjoin.maxsize", 100000), + HIVEHASHTABLETHRESHOLD("hive.hashtable.threshold", 100000), + HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75), + HIVEHASHTABLEMAXMEMORYUSAGE("hive.hashtable.max.memory.usage", (float) 0.90), + HIVEHASHTABLESCALE("hive.hashtable.scale", (long)100000), + HIVEDEBUGLOCALTASK("hive.debug.localtask",false), + HIVEJOBPROGRESS("hive.task.progress", false), HIVEINPUTFORMAT("hive.input.format", ""), Index: serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java (revision 1034599) +++ serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java (working copy) @@ -187,6 +187,7 @@ return copyToStandardObject(o, oi, ObjectInspectorCopyOption.DEFAULT); } + public static Object copyToStandardObject(Object o, ObjectInspector oi, ObjectInspectorCopyOption objectInspectorOption) { if (o == null) { Index: ql/src/test/results/clientpositive/auto_join3.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join3.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join3.q.out (revision 0) @@ -0,0 +1,28 @@ +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) +INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) +INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-59-30_884_3252767069479385264/-mr-10000 +POSTHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-59-30_884_3252767069479385264/-mr-10000 +POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +344360994461 Index: ql/src/test/results/clientpositive/auto_join13.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join13.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join13.q.out (revision 0) @@ -0,0 +1,25 @@ +PREHOOK: query: SELECT sum(hash(src1.c1, src2.c4)) +FROM +(SELECT src.key as c1, src.value as c2 from src) src1 +JOIN +(SELECT src.key as c3, src.value as c4 from src) src2 +ON src1.c1 = src2.c3 AND src1.c1 < 100 +JOIN +(SELECT src.key as c5, src.value as c6 from src) src3 +ON src1.c1 + src2.c3 = src3.c5 AND src3.c5 < 200 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-57-23_066_8932337514085876743/-mr-10000 +POSTHOOK: query: SELECT sum(hash(src1.c1, src2.c4)) +FROM +(SELECT src.key as c1, src.value as c2 from src) src1 +JOIN +(SELECT src.key as c3, src.value as c4 from src) src2 +ON src1.c1 = src2.c3 AND src1.c1 < 100 +JOIN +(SELECT src.key as c5, src.value as c6 from src) src3 +ON src1.c1 + src2.c3 = src3.c5 AND src3.c5 < 200 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-57-23_066_8932337514085876743/-mr-10000 +-97670109576 Index: ql/src/test/results/clientpositive/auto_join_filters.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join_filters.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join_filters.q.out (revision 0) @@ -0,0 +1,508 @@ +PREHOOK: query: CREATE TABLE myinput1(key int, value int) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE myinput1(key int, value int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@myinput1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/in3.txt' INTO TABLE myinput1 +PREHOOK: type: LOAD +POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/in3.txt' INTO TABLE myinput1 +POSTHOOK: type: LOAD +POSTHOOK: Output: default@myinput1 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-54-58_117_4655002105750219/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-54-58_117_4655002105750219/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-02_366_6992699161975993053/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-02_366_6992699161975993053/-mr-10000 +4937935 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-06_634_6445437890696840782/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-06_634_6445437890696840782/-mr-10000 +3080335 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-11_343_4548102731373306036/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-11_343_4548102731373306036/-mr-10000 +19749880 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-16_548_5347533565842262214/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-16_548_5347533565842262214/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-20_995_7002425294061259326/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-20_995_7002425294061259326/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-25_522_7223107408677755461/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-25_522_7223107408677755461/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-29_880_4549706398893829025/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-29_880_4549706398893829025/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-34_424_169089929568714471/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-34_424_169089929568714471/-mr-10000 +4937935 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-38_712_4712108057879519702/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-38_712_4712108057879519702/-mr-10000 +4937935 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-44_225_1720576234903554068/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-44_225_1720576234903554068/-mr-10000 +4937935 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-49_018_1771521729720905137/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-49_018_1771521729720905137/-mr-10000 +4937935 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-53_798_4683803195318455359/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-53_798_4683803195318455359/-mr-10000 +3080335 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-58_327_8236566722650852723/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-55-58_327_8236566722650852723/-mr-10000 +3080335 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-03_065_2996745104714988105/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-03_065_2996745104714988105/-mr-10000 +3080335 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-07_973_4625804318414132050/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-07_973_4625804318414132050/-mr-10000 +3080335 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-12_356_8298086109390383704/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-12_356_8298086109390383704/-mr-10000 +4939870 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-17_229_5664599811337231023/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-17_229_5664599811337231023/-mr-10000 +4939870 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-21_523_7285750866206869237/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-21_523_7285750866206869237/-mr-10000 +4939870 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-26_625_2655349310089115439/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-26_625_2655349310089115439/-mr-10000 +4939870 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-32_045_6195138915928486369/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-32_045_6195138915928486369/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-36_424_3499869040206387595/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-36_424_3499869040206387595/-mr-10000 +3080335 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-40_774_9079581832077075239/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-40_774_9079581832077075239/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-47_855_6954543840613097127/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-47_855_6954543840613097127/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-54_495_8347295964623851135/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-56-54_495_8347295964623851135/-mr-10000 +3080335 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-02_103_2472217444100405726/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-02_103_2472217444100405726/-mr-10000 +3078400 +PREHOOK: query: CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@smb_input1 +PREHOOK: query: CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@smb_input2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/in1.txt' into table smb_input1 +PREHOOK: type: LOAD +POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/in1.txt' into table smb_input1 +POSTHOOK: type: LOAD +POSTHOOK: Output: default@smb_input1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/in2.txt' into table smb_input1 +PREHOOK: type: LOAD +POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/in2.txt' into table smb_input1 +POSTHOOK: type: LOAD +POSTHOOK: Output: default@smb_input1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/in1.txt' into table smb_input2 +PREHOOK: type: LOAD +POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/in1.txt' into table smb_input2 +POSTHOOK: type: LOAD +POSTHOOK: Output: default@smb_input2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/in2.txt' into table smb_input2 +PREHOOK: type: LOAD +POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/in2.txt' into table smb_input2 +POSTHOOK: type: LOAD +POSTHOOK: Output: default@smb_input2 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-14_989_3018079175573168618/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-14_989_3018079175573168618/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-20_236_1394320099184495685/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-20_236_1394320099184495685/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-24_621_5150978026396273270/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-24_621_5150978026396273270/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-28_867_297793229671774013/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-28_867_297793229671774013/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-33_384_4213543405474846039/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-33_384_4213543405474846039/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-39_211_3398396582126586587/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-39_211_3398396582126586587/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-43_619_5340976824563811251/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-43_619_5340976824563811251/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-48_456_7729606752684080265/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-48_456_7729606752684080265/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-53_321_4161401378864306240/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-53_321_4161401378864306240/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-58_185_6149588741729351812/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-57-58_185_6149588741729351812/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-03_957_1701501074902739227/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-03_957_1701501074902739227/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-08_779_8730807341859621811/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-08_779_8730807341859621811/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-14_365_6978393466194392157/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-14_365_6978393466194392157/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-19_195_6596525098045181032/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-19_195_6596525098045181032/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-23_944_767168653859149753/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-23_944_767168653859149753/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-28_382_1684228097795355116/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-28_382_1684228097795355116/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-33_122_626333363788741642/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-33_122_626333363788741642/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-37_662_749514132507506514/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-37_662_749514132507506514/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-43_069_1206846342850380139/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-43_069_1206846342850380139/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-47_842_61082551066774054/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-47_842_61082551066774054/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-52_552_2495832598945335366/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-52_552_2495832598945335366/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-57_562_5897053800333413408/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-58-57_562_5897053800333413408/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-02_213_6341773528378956115/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-02_213_6341773528378956115/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-10_007_6693537617189622123/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-10_007_6693537617189622123/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-18_342_340633536253451649/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-18_342_340633536253451649/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-26_006_4411101160964366136/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-26_006_4411101160964366136/-mr-10000 +3078400 Index: ql/src/test/results/clientpositive/auto_join22.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join22.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join22.q.out (revision 0) @@ -0,0 +1,9 @@ +PREHOOK: query: SELECT sum(hash(src5.src1_value)) FROM (SELECT src3.*, src4.value as src4_value, src4.key as src4_key FROM src src4 JOIN (SELECT src2.*, src1.key as src1_key, src1.value as src1_value FROM src src1 JOIN src src2 ON src1.key = src2.key) src3 ON src3.src1_key = src4.key) src5 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-58-58_110_8402656982117607045/-mr-10000 +POSTHOOK: query: SELECT sum(hash(src5.src1_value)) FROM (SELECT src3.*, src4.value as src4_value, src4.key as src4_key FROM src src4 JOIN (SELECT src2.*, src1.key as src1_key, src1.value as src1_value FROM src src1 JOIN src src2 ON src1.key = src2.key) src3 ON src3.src1_key = src4.key) src5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-58-58_110_8402656982117607045/-mr-10000 +344337359100 Index: ql/src/test/results/clientpositive/auto_join9.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join9.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join9.q.out (revision 0) @@ -0,0 +1,30 @@ +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_14-00-19_600_8365200661139778951/-mr-10000 +POSTHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_14-00-19_600_8365200661139778951/-mr-10000 +POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +101861029915 Index: ql/src/test/results/clientpositive/auto_join19.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join19.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join19.q.out (revision 0) @@ -0,0 +1,38 @@ +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11') +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-58-16_826_6607338225059284245/-mr-10000 +POSTHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-58-16_826_6607338225059284245/-mr-10000 +POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +407444119660 Index: ql/src/test/results/clientpositive/auto_join4.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join4.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join4.q.out (revision 0) @@ -0,0 +1,54 @@ +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: FROM ( + FROM + ( + FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 + ) a + LEFT OUTER JOIN + ( + FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 + ) b + ON (a.c1 = b.c3) + SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 +) c +INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM ( + FROM + ( + FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 + ) a + LEFT OUTER JOIN + ( + FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 + ) b + ON (a.c1 = b.c3) + SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 +) c +INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-59-39_083_7175362309149394472/-mr-10000 +POSTHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-59-39_083_7175362309149394472/-mr-10000 +POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +5079148035 Index: ql/src/test/results/clientpositive/auto_join14.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join14.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join14.q.out (revision 0) @@ -0,0 +1,32 @@ +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 +INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 +INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c2 SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2)) FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +PREHOOK: Output: file:/data/users/liyintang/hive/build/ql/scratchdir/hive_2010-11-11_13-57-37_820_570468427334583490/-mr-10000 +POSTHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2)) FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +POSTHOOK: Output: file:/data/users/liyintang/hive/build/ql/scratchdir/hive_2010-11-11_13-57-37_820_570468427334583490/-mr-10000 +POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c2 SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +404554174174 Index: ql/src/test/results/clientpositive/auto_join23.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join23.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join23.q.out (revision 0) @@ -0,0 +1,108 @@ +PREHOOK: query: SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-59-07_974_2608644986614648414/-mr-10000 +POSTHOOK: query: SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-59-07_974_2608644986614648414/-mr-10000 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 2 val_2 +0 val_0 2 val_2 +0 val_0 2 val_2 +0 val_0 4 val_4 +0 val_0 4 val_4 +0 val_0 4 val_4 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 5 val_5 +0 val_0 8 val_8 +0 val_0 8 val_8 +0 val_0 8 val_8 +0 val_0 9 val_9 +0 val_0 9 val_9 +0 val_0 9 val_9 +2 val_2 0 val_0 +2 val_2 0 val_0 +2 val_2 0 val_0 +2 val_2 2 val_2 +2 val_2 4 val_4 +2 val_2 5 val_5 +2 val_2 5 val_5 +2 val_2 5 val_5 +2 val_2 8 val_8 +2 val_2 9 val_9 +4 val_4 0 val_0 +4 val_4 0 val_0 +4 val_4 0 val_0 +4 val_4 2 val_2 +4 val_4 4 val_4 +4 val_4 5 val_5 +4 val_4 5 val_5 +4 val_4 5 val_5 +4 val_4 8 val_8 +4 val_4 9 val_9 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 0 val_0 +5 val_5 2 val_2 +5 val_5 2 val_2 +5 val_5 2 val_2 +5 val_5 4 val_4 +5 val_5 4 val_4 +5 val_5 4 val_4 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 8 val_8 +5 val_5 8 val_8 +5 val_5 8 val_8 +5 val_5 9 val_9 +5 val_5 9 val_9 +5 val_5 9 val_9 +8 val_8 0 val_0 +8 val_8 0 val_0 +8 val_8 0 val_0 +8 val_8 2 val_2 +8 val_8 4 val_4 +8 val_8 5 val_5 +8 val_8 5 val_5 +8 val_8 5 val_5 +8 val_8 8 val_8 +8 val_8 9 val_9 +9 val_9 0 val_0 +9 val_9 0 val_0 +9 val_9 0 val_0 +9 val_9 2 val_2 +9 val_9 4 val_4 +9 val_9 5 val_5 +9 val_9 5 val_5 +9 val_9 5 val_5 +9 val_9 8 val_8 +9 val_9 9 val_9 Index: ql/src/test/results/clientpositive/auto_join5.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join5.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join5.q.out (revision 0) @@ -0,0 +1,54 @@ +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: FROM ( + FROM + ( + FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 + ) a + RIGHT OUTER JOIN + ( + FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 + ) b + ON (a.c1 = b.c3) + SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 +) c +INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM ( + FROM + ( + FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 + ) a + RIGHT OUTER JOIN + ( + FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 + ) b + ON (a.c1 = b.c3) + SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 +) c +INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-59-47_350_2456339860879092146/-mr-10000 +POSTHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-59-47_350_2456339860879092146/-mr-10000 +POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +9766083196 Index: ql/src/test/results/clientpositive/auto_join15.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join15.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join15.q.out (revision 0) @@ -0,0 +1,19 @@ +PREHOOK: query: select sum(hash(a.k1,a.v1,a.k2, a.v2)) +from ( +SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 +FROM src src1 JOIN src src2 ON (src1.key = src2.key) +SORT BY k1, v1, k2, v2 +) a +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-57-40_789_4897698283203870415/-mr-10000 +POSTHOOK: query: select sum(hash(a.k1,a.v1,a.k2, a.v2)) +from ( +SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 +FROM src src1 JOIN src src2 ON (src1.key = src2.key) +SORT BY k1, v1, k2, v2 +) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-57-40_789_4897698283203870415/-mr-10000 +524272996896 Index: ql/src/test/results/clientpositive/auto_join_nulls.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join_nulls.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join_nulls.q.out (revision 0) @@ -0,0 +1,208 @@ +PREHOOK: query: CREATE TABLE myinput1(key int, value int) +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE myinput1(key int, value int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@myinput1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/in1.txt' INTO TABLE myinput1 +PREHOOK: type: LOAD +POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/in1.txt' INTO TABLE myinput1 +POSTHOOK: type: LOAD +POSTHOOK: Output: default@myinput1 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-38_128_5721983744421015511/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-38_128_5721983744421015511/-mr-10000 +13630578 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-42_458_3857336078300862994/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-42_458_3857336078300862994/-mr-10000 +13630578 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-46_795_3295843614724111648/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-46_795_3295843614724111648/-mr-10000 +13630578 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-51_810_8495116173420785144/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-51_810_8495116173420785144/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-56_866_676999079349671986/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-59-56_866_676999079349671986/-mr-10000 +4509856 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-01_985_6481162350367357952/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-01_985_6481162350367357952/-mr-10000 +3112070 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-07_433_5697822947001003360/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-07_433_5697822947001003360/-mr-10000 +3078400 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-11_860_7663380874040287741/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-11_860_7663380874040287741/-mr-10000 +4542003 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-17_087_5570220930038430711/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-17_087_5570220930038430711/-mr-10000 +4542038 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-21_730_7068579215030025497/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-21_730_7068579215030025497/-mr-10000 +4543491 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-26_763_7077135887434220096/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-26_763_7077135887434220096/-mr-10000 +4542003 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-31_723_2846348272261226572/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-31_723_2846348272261226572/-mr-10000 +3079923 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-37_879_1948558748358099991/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-37_879_1948558748358099991/-mr-10000 +4509891 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-42_711_8448804775949567435/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-42_711_8448804775949567435/-mr-10000 +3113558 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-48_016_2711326187485329520/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-48_016_2711326187485329520/-mr-10000 +3079923 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-52_763_9087751756999134388/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-52_763_9087751756999134388/-mr-10000 +4543526 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-58_006_5983144248451158873/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-00-58_006_5983144248451158873/-mr-10000 +4543526 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-01-03_223_3881722054830137933/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-01-03_223_3881722054830137933/-mr-10000 +4543526 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-01-07_938_7263469664808317293/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-01-07_938_7263469664808317293/-mr-10000 +4543526 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value) +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-01-12_851_3735155037354765439/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-01-12_851_3735155037354765439/-mr-10000 +3112070 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value) +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-01-17_734_3064013262164657326/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-01-17_734_3064013262164657326/-mr-10000 +3113558 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-01-22_200_5772028834197460678/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-01-22_200_5772028834197460678/-mr-10000 +3112070 Index: ql/src/test/results/clientpositive/auto_join24.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join24.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join24.q.out (revision 0) @@ -0,0 +1,28 @@ +PREHOOK: query: create table tst1(key STRING, cnt INT) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table tst1(key STRING, cnt INT) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@tst1 +PREHOOK: query: INSERT OVERWRITE TABLE tst1 +SELECT a.key, count(1) FROM src a group by a.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tst1 +POSTHOOK: query: INSERT OVERWRITE TABLE tst1 +SELECT a.key, count(1) FROM src a group by a.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tst1 +POSTHOOK: Lineage: tst1.cnt EXPRESSION [(src)a.null, ] +POSTHOOK: Lineage: tst1.key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT sum(a.cnt) FROM tst1 a JOIN tst1 b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tst1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-59-18_289_1995655856370946124/-mr-10000 +POSTHOOK: query: SELECT sum(a.cnt) FROM tst1 a JOIN tst1 b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tst1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-59-18_289_1995655856370946124/-mr-10000 +POSTHOOK: Lineage: tst1.cnt EXPRESSION [(src)a.null, ] +POSTHOOK: Lineage: tst1.key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] +500 Index: ql/src/test/results/clientpositive/auto_join0.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join0.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join0.q.out (revision 0) @@ -0,0 +1,25 @@ +PREHOOK: query: select sum(hash(a.k1,a.v1,a.k2, a.v2)) +from ( +SELECT src1.key as k1, src1.value as v1, + src2.key as k2, src2.value as v2 FROM + (SELECT * FROM src WHERE src.key < 10) src1 + JOIN + (SELECT * FROM src WHERE src.key < 10) src2 + SORT BY k1, v1, k2, v2 +) a +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-03-39_029_8848165047272229259/-mr-10000 +POSTHOOK: query: select sum(hash(a.k1,a.v1,a.k2, a.v2)) +from ( +SELECT src1.key as k1, src1.value as v1, + src2.key as k2, src2.value as v2 FROM + (SELECT * FROM src WHERE src.key < 10) src1 + JOIN + (SELECT * FROM src WHERE src.key < 10) src2 + SORT BY k1, v1, k2, v2 +) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-03-39_029_8848165047272229259/-mr-10000 +34441656720 Index: ql/src/test/results/clientpositive/auto_join10.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join10.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join10.q.out (revision 0) @@ -0,0 +1,19 @@ +PREHOOK: query: FROM +(SELECT src.* FROM src) x +JOIN +(SELECT src.* FROM src) Y +ON (x.key = Y.key) +select sum(hash(Y.key,Y.value)) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-57-05_712_8206255717234696707/-mr-10000 +POSTHOOK: query: FROM +(SELECT src.* FROM src) x +JOIN +(SELECT src.* FROM src) Y +ON (x.key = Y.key) +select sum(hash(Y.key,Y.value)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-57-05_712_8206255717234696707/-mr-10000 +103231310608 Index: ql/src/test/results/clientpositive/auto_join6.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join6.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join6.q.out (revision 0) @@ -0,0 +1,54 @@ +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: FROM ( + FROM + ( + FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 + ) a + FULL OUTER JOIN + ( + FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 + ) b + ON (a.c1 = b.c3) + SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 +) c +INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM ( + FROM + ( + FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 + ) a + FULL OUTER JOIN + ( + FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 + ) b + ON (a.c1 = b.c3) + SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 +) c +INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-59-54_416_3923556325108323831/-mr-10000 +POSTHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-59-54_416_3923556325108323831/-mr-10000 +POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +2607643291 Index: ql/src/test/results/clientpositive/auto_join16.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join16.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join16.q.out (revision 0) @@ -0,0 +1,19 @@ +PREHOOK: query: SELECT sum(hash(subq.key, tab.value)) +FROM +(select a.key, a.value from src a where a.key > 10 ) subq +JOIN src tab +ON (subq.key = tab.key and subq.key > 20 and subq.value = tab.value) +where tab.value < 200 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-57-48_197_114372071445295305/-mr-10000 +POSTHOOK: query: SELECT sum(hash(subq.key, tab.value)) +FROM +(select a.key, a.value from src a where a.key > 10 ) subq +JOIN src tab +ON (subq.key = tab.key and subq.key > 20 and subq.value = tab.value) +where tab.value < 200 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-57-48_197_114372071445295305/-mr-10000 +NULL Index: ql/src/test/results/clientpositive/auto_join25.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join25.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join25.q.out (revision 0) @@ -0,0 +1,112 @@ +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11') +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-12_13-09-55_396_5931103673819276275/-mr-10000 +POSTHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-12_13-09-55_396_5931103673819276275/-mr-10000 +POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +407444119660 +PREHOOK: query: CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest_j2 +POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) +INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest_j2 +POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) +INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest_j2 +POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j2.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j2.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest_j2.key,dest_j2.value)) FROM dest_j2 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest_j2 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-12_13-10-05_166_1509512648391049274/-mr-10000 +POSTHOOK: query: SELECT sum(hash(dest_j2.key,dest_j2.value)) FROM dest_j2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest_j2 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-12_13-10-05_166_1509512648391049274/-mr-10000 +POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j2.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j2.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +33815990627 +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest_j1 +POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j2.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j2.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest_j1 +POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest_j1 +POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j2.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j2.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest_j1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-12_13-10-12_389_3216138172725864168/-mr-10000 +POSTHOOK: query: SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest_j1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-12_13-10-12_389_3216138172725864168/-mr-10000 +POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j2.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j2.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +101861029915 Index: ql/src/test/results/clientpositive/auto_join1.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join1.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join1.q.out (revision 0) @@ -0,0 +1,28 @@ +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest_j1 +PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest_j1 +POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest_j1 +POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest_j1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-03-51_080_4066172686967477332/-mr-10000 +POSTHOOK: query: SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest_j1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-03-51_080_4066172686967477332/-mr-10000 +POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +101861029915 Index: ql/src/test/results/clientpositive/auto_join11.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join11.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join11.q.out (revision 0) @@ -0,0 +1,19 @@ +PREHOOK: query: SELECT sum(hash(src1.c1, src2.c4)) +FROM +(SELECT src.key as c1, src.value as c2 from src) src1 +JOIN +(SELECT src.key as c3, src.value as c4 from src) src2 +ON src1.c1 = src2.c3 AND src1.c1 < 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-57-10_986_3692178789106531054/-mr-10000 +POSTHOOK: query: SELECT sum(hash(src1.c1, src2.c4)) +FROM +(SELECT src.key as c1, src.value as c2 from src) src1 +JOIN +(SELECT src.key as c3, src.value as c4 from src) src2 +ON src1.c1 = src2.c3 AND src1.c1 < 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-57-10_986_3692178789106531054/-mr-10000 +-101333194320 Index: ql/src/test/results/clientpositive/auto_join20.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join20.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join20.q.out (revision 0) @@ -0,0 +1,38 @@ +PREHOOK: query: select sum(hash(a.k1,a.v1,a.k2,a.v2,a.k3,a.v3)) +from ( +SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 , src3.key as k3, src3.value as v3 +FROM src src1 JOIN src src2 ON (src1.key = src2.key AND src1.key < 10) RIGHT OUTER JOIN src src3 ON (src1.key = src3.key AND src3.key < 20) +SORT BY k1,v1,k2,v2,k3,v3 +)a +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-58-31_391_2561134591701845613/-mr-10000 +POSTHOOK: query: select sum(hash(a.k1,a.v1,a.k2,a.v2,a.k3,a.v3)) +from ( +SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 , src3.key as k3, src3.value as v3 +FROM src src1 JOIN src src2 ON (src1.key = src2.key AND src1.key < 10) RIGHT OUTER JOIN src src3 ON (src1.key = src3.key AND src3.key < 20) +SORT BY k1,v1,k2,v2,k3,v3 +)a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-58-31_391_2561134591701845613/-mr-10000 +56157587016 +PREHOOK: query: select sum(hash(a.k1,a.v1,a.k2,a.v2,a.k3,a.v3)) +from ( +SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 , src3.key as k3, src3.value as v3 +FROM src src1 JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key < 15) RIGHT OUTER JOIN src src3 ON (src1.key = src3.key AND src3.key < 20) +SORT BY k1,v1,k2,v2,k3,v3 +)a +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-58-40_953_5141108862574850381/-mr-10000 +POSTHOOK: query: select sum(hash(a.k1,a.v1,a.k2,a.v2,a.k3,a.v3)) +from ( +SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 , src3.key as k3, src3.value as v3 +FROM src src1 JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key < 15) RIGHT OUTER JOIN src src3 ON (src1.key = src3.key AND src3.key < 20) +SORT BY k1,v1,k2,v2,k3,v3 +)a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-58-40_953_5141108862574850381/-mr-10000 +56157587016 Index: ql/src/test/results/clientpositive/auto_join7.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join7.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join7.q.out (revision 0) @@ -0,0 +1,68 @@ +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: FROM ( + FROM + ( + FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 + ) a + FULL OUTER JOIN + ( + FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 + ) b + ON (a.c1 = b.c3) + LEFT OUTER JOIN + ( + FROM src src3 SELECT src3.key AS c5, src3.value AS c6 WHERE src3.key > 20 and src3.key < 25 + ) c + ON (a.c1 = c.c5) + SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 +) c +INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM ( + FROM + ( + FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 + ) a + FULL OUTER JOIN + ( + FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 + ) b + ON (a.c1 = b.c3) + LEFT OUTER JOIN + ( + FROM src src3 SELECT src3.key AS c5, src3.value AS c6 WHERE src3.key > 20 and src3.key < 25 + ) c + ON (a.c1 = c.c5) + SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 +) c +INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src3.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c6 SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4,dest1.c5,dest1.c6)) FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_14-00-02_419_6381793453484760735/-mr-10000 +POSTHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4,dest1.c5,dest1.c6)) FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_14-00-02_419_6381793453484760735/-mr-10000 +POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src3.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c6 SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +-5178357269 Index: ql/src/test/results/clientpositive/auto_join17.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join17.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join17.q.out (revision 0) @@ -0,0 +1,32 @@ +PREHOOK: query: CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.* +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.* +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.key1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.key2 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value1 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value2 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1.key1,dest1.value1,dest1.key2,dest1.value2)) FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-57-58_318_8160544782535342891/-mr-10000 +POSTHOOK: query: SELECT sum(hash(dest1.key1,dest1.value1,dest1.key2,dest1.value2)) FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-57-58_318_8160544782535342891/-mr-10000 +POSTHOOK: Lineage: dest1.key1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.key2 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value1 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.value2 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +-793937029770 Index: ql/src/test/results/clientpositive/auto_join18_multi_distinct.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join18_multi_distinct.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join18_multi_distinct.q.out (revision 0) @@ -0,0 +1,33 @@ +PREHOOK: query: SELECT sum(hash(a.key, a.value, b.key, b.value1, b.value2)) + FROM + ( + SELECT src1.key as key, count(src1.value) AS value FROM src src1 group by src1.key + ) a + FULL OUTER JOIN + ( + SELECT src2.key as key, count(distinct(src2.value)) AS value1, + count(distinct(src2.key)) AS value2 + FROM src1 src2 group by src2.key + ) b + ON (a.key = b.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-49-27_871_4849921319201129580/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key, a.value, b.key, b.value1, b.value2)) + FROM + ( + SELECT src1.key as key, count(src1.value) AS value FROM src src1 group by src1.key + ) a + FULL OUTER JOIN + ( + SELECT src2.key as key, count(distinct(src2.value)) AS value1, + count(distinct(src2.key)) AS value2 + FROM src1 src2 group by src2.key + ) b + ON (a.key = b.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_17-49-27_871_4849921319201129580/-mr-10000 +14748607855 Index: ql/src/test/results/clientpositive/auto_join2.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join2.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join2.q.out (revision 0) @@ -0,0 +1,28 @@ +PREHOOK: query: CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest_j2 +PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) +INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest_j2 +POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) +INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest_j2 +POSTHOOK: Lineage: dest_j2.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j2.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest_j2.key,dest_j2.value)) FROM dest_j2 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest_j2 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-04-00_455_5237938999562224952/-mr-10000 +POSTHOOK: query: SELECT sum(hash(dest_j2.key,dest_j2.value)) FROM dest_j2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest_j2 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-04-00_455_5237938999562224952/-mr-10000 +POSTHOOK: Lineage: dest_j2.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j2.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +33815990627 Index: ql/src/test/results/clientpositive/auto_join12.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join12.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join12.q.out (revision 0) @@ -0,0 +1,25 @@ +PREHOOK: query: SELECT sum(hash(src1.c1, src2.c4)) +FROM +(SELECT src.key as c1, src.value as c2 from src) src1 +JOIN +(SELECT src.key as c3, src.value as c4 from src) src2 +ON src1.c1 = src2.c3 AND src1.c1 < 100 +JOIN +(SELECT src.key as c5, src.value as c6 from src) src3 +ON src1.c1 = src3.c5 AND src3.c5 < 80 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-57-16_550_6307908137223409683/-mr-10000 +POSTHOOK: query: SELECT sum(hash(src1.c1, src2.c4)) +FROM +(SELECT src.key as c1, src.value as c2 from src) src1 +JOIN +(SELECT src.key as c3, src.value as c4 from src) src2 +ON src1.c1 = src2.c3 AND src1.c1 < 100 +JOIN +(SELECT src.key as c5, src.value as c6 from src) src3 +ON src1.c1 = src3.c5 AND src3.c5 < 80 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-57-16_550_6307908137223409683/-mr-10000 +-136843922952 Index: ql/src/test/results/clientpositive/auto_join21.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join21.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join21.q.out (revision 0) @@ -0,0 +1,2614 @@ +PREHOOK: query: SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-04-03_499_4254989401378766755/-mr-10000 +POSTHOOK: query: SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_18-04-03_499_4254989401378766755/-mr-10000 +NULL NULL NULL NULL 0 val_0 +NULL NULL NULL NULL 0 val_0 +NULL NULL NULL NULL 0 val_0 +NULL NULL NULL NULL 10 val_10 +NULL NULL NULL NULL 100 val_100 +NULL NULL NULL NULL 100 val_100 +NULL NULL NULL NULL 100 val_100 +NULL NULL NULL NULL 100 val_100 +NULL NULL NULL NULL 100 val_100 +NULL NULL NULL NULL 100 val_100 +NULL NULL NULL NULL 100 val_100 +NULL NULL NULL NULL 100 val_100 +NULL NULL NULL NULL 103 val_103 +NULL NULL NULL NULL 103 val_103 +NULL NULL NULL NULL 103 val_103 +NULL NULL NULL NULL 103 val_103 +NULL NULL NULL NULL 103 val_103 +NULL NULL NULL NULL 103 val_103 +NULL NULL NULL NULL 103 val_103 +NULL NULL NULL NULL 103 val_103 +NULL NULL NULL NULL 104 val_104 +NULL NULL NULL NULL 104 val_104 +NULL NULL NULL NULL 104 val_104 +NULL NULL NULL NULL 104 val_104 +NULL NULL NULL NULL 104 val_104 +NULL NULL NULL NULL 104 val_104 +NULL NULL NULL NULL 104 val_104 +NULL NULL NULL NULL 104 val_104 +NULL NULL NULL NULL 105 val_105 +NULL NULL NULL NULL 11 val_11 +NULL NULL NULL NULL 111 val_111 +NULL NULL NULL NULL 113 val_113 +NULL NULL NULL NULL 113 val_113 +NULL NULL NULL NULL 113 val_113 +NULL NULL NULL NULL 113 val_113 +NULL NULL NULL NULL 113 val_113 +NULL NULL NULL NULL 113 val_113 +NULL NULL NULL NULL 113 val_113 +NULL NULL NULL NULL 113 val_113 +NULL NULL NULL NULL 114 val_114 +NULL NULL NULL NULL 116 val_116 +NULL NULL NULL NULL 118 val_118 +NULL NULL NULL NULL 118 val_118 +NULL NULL NULL NULL 118 val_118 +NULL NULL NULL NULL 118 val_118 +NULL NULL NULL NULL 118 val_118 +NULL NULL NULL NULL 118 val_118 +NULL NULL NULL NULL 118 val_118 +NULL NULL NULL NULL 118 val_118 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 119 val_119 +NULL NULL NULL NULL 12 val_12 +NULL NULL NULL NULL 12 val_12 +NULL NULL NULL NULL 12 val_12 +NULL NULL NULL NULL 12 val_12 +NULL NULL NULL NULL 12 val_12 +NULL NULL NULL NULL 12 val_12 +NULL NULL NULL NULL 12 val_12 +NULL NULL NULL NULL 12 val_12 +NULL NULL NULL NULL 120 val_120 +NULL NULL NULL NULL 120 val_120 +NULL NULL NULL NULL 120 val_120 +NULL NULL NULL NULL 120 val_120 +NULL NULL NULL NULL 120 val_120 +NULL NULL NULL NULL 120 val_120 +NULL NULL NULL NULL 120 val_120 +NULL NULL NULL NULL 120 val_120 +NULL NULL NULL NULL 125 val_125 +NULL NULL NULL NULL 125 val_125 +NULL NULL NULL NULL 125 val_125 +NULL NULL NULL NULL 125 val_125 +NULL NULL NULL NULL 125 val_125 +NULL NULL NULL NULL 125 val_125 +NULL NULL NULL NULL 125 val_125 +NULL NULL NULL NULL 125 val_125 +NULL NULL NULL NULL 126 val_126 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 128 val_128 +NULL NULL NULL NULL 129 val_129 +NULL NULL NULL NULL 129 val_129 +NULL NULL NULL NULL 129 val_129 +NULL NULL NULL NULL 129 val_129 +NULL NULL NULL NULL 129 val_129 +NULL NULL NULL NULL 129 val_129 +NULL NULL NULL NULL 129 val_129 +NULL NULL NULL NULL 129 val_129 +NULL NULL NULL NULL 131 val_131 +NULL NULL NULL NULL 133 val_133 +NULL NULL NULL NULL 134 val_134 +NULL NULL NULL NULL 134 val_134 +NULL NULL NULL NULL 134 val_134 +NULL NULL NULL NULL 134 val_134 +NULL NULL NULL NULL 134 val_134 +NULL NULL NULL NULL 134 val_134 +NULL NULL NULL NULL 134 val_134 +NULL NULL NULL NULL 134 val_134 +NULL NULL NULL NULL 136 val_136 +NULL NULL NULL NULL 137 val_137 +NULL NULL NULL NULL 137 val_137 +NULL NULL NULL NULL 137 val_137 +NULL NULL NULL NULL 137 val_137 +NULL NULL NULL NULL 137 val_137 +NULL NULL NULL NULL 137 val_137 +NULL NULL NULL NULL 137 val_137 +NULL NULL NULL NULL 137 val_137 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 138 val_138 +NULL NULL NULL NULL 143 val_143 +NULL NULL NULL NULL 145 val_145 +NULL NULL NULL NULL 146 val_146 +NULL NULL NULL NULL 146 val_146 +NULL NULL NULL NULL 146 val_146 +NULL NULL NULL NULL 146 val_146 +NULL NULL NULL NULL 146 val_146 +NULL NULL NULL NULL 146 val_146 +NULL NULL NULL NULL 146 val_146 +NULL NULL NULL NULL 146 val_146 +NULL NULL NULL NULL 149 val_149 +NULL NULL NULL NULL 149 val_149 +NULL NULL NULL NULL 149 val_149 +NULL NULL NULL NULL 149 val_149 +NULL NULL NULL NULL 149 val_149 +NULL NULL NULL NULL 149 val_149 +NULL NULL NULL NULL 149 val_149 +NULL NULL NULL NULL 149 val_149 +NULL NULL NULL NULL 15 val_15 +NULL NULL NULL NULL 15 val_15 +NULL NULL NULL NULL 15 val_15 +NULL NULL NULL NULL 15 val_15 +NULL NULL NULL NULL 15 val_15 +NULL NULL NULL NULL 15 val_15 +NULL NULL NULL NULL 15 val_15 +NULL NULL NULL NULL 15 val_15 +NULL NULL NULL NULL 150 val_150 +NULL NULL NULL NULL 152 val_152 +NULL NULL NULL NULL 152 val_152 +NULL NULL NULL NULL 152 val_152 +NULL NULL NULL NULL 152 val_152 +NULL NULL NULL NULL 152 val_152 +NULL NULL NULL NULL 152 val_152 +NULL NULL NULL NULL 152 val_152 +NULL NULL NULL NULL 152 val_152 +NULL NULL NULL NULL 153 val_153 +NULL NULL NULL NULL 155 val_155 +NULL NULL NULL NULL 156 val_156 +NULL NULL NULL NULL 157 val_157 +NULL NULL NULL NULL 158 val_158 +NULL NULL NULL NULL 160 val_160 +NULL NULL NULL NULL 162 val_162 +NULL NULL NULL NULL 163 val_163 +NULL NULL NULL NULL 164 val_164 +NULL NULL NULL NULL 164 val_164 +NULL NULL NULL NULL 164 val_164 +NULL NULL NULL NULL 164 val_164 +NULL NULL NULL NULL 164 val_164 +NULL NULL NULL NULL 164 val_164 +NULL NULL NULL NULL 164 val_164 +NULL NULL NULL NULL 164 val_164 +NULL NULL NULL NULL 165 val_165 +NULL NULL NULL NULL 165 val_165 +NULL NULL NULL NULL 165 val_165 +NULL NULL NULL NULL 165 val_165 +NULL NULL NULL NULL 165 val_165 +NULL NULL NULL NULL 165 val_165 +NULL NULL NULL NULL 165 val_165 +NULL NULL NULL NULL 165 val_165 +NULL NULL NULL NULL 166 val_166 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 167 val_167 +NULL NULL NULL NULL 168 val_168 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 169 val_169 +NULL NULL NULL NULL 17 val_17 +NULL NULL NULL NULL 170 val_170 +NULL NULL NULL NULL 172 val_172 +NULL NULL NULL NULL 172 val_172 +NULL NULL NULL NULL 172 val_172 +NULL NULL NULL NULL 172 val_172 +NULL NULL NULL NULL 172 val_172 +NULL NULL NULL NULL 172 val_172 +NULL NULL NULL NULL 172 val_172 +NULL NULL NULL NULL 172 val_172 +NULL NULL NULL NULL 174 val_174 +NULL NULL NULL NULL 174 val_174 +NULL NULL NULL NULL 174 val_174 +NULL NULL NULL NULL 174 val_174 +NULL NULL NULL NULL 174 val_174 +NULL NULL NULL NULL 174 val_174 +NULL NULL NULL NULL 174 val_174 +NULL NULL NULL NULL 174 val_174 +NULL NULL NULL NULL 175 val_175 +NULL NULL NULL NULL 175 val_175 +NULL NULL NULL NULL 175 val_175 +NULL NULL NULL NULL 175 val_175 +NULL NULL NULL NULL 175 val_175 +NULL NULL NULL NULL 175 val_175 +NULL NULL NULL NULL 175 val_175 +NULL NULL NULL NULL 175 val_175 +NULL NULL NULL NULL 176 val_176 +NULL NULL NULL NULL 176 val_176 +NULL NULL NULL NULL 176 val_176 +NULL NULL NULL NULL 176 val_176 +NULL NULL NULL NULL 176 val_176 +NULL NULL NULL NULL 176 val_176 +NULL NULL NULL NULL 176 val_176 +NULL NULL NULL NULL 176 val_176 +NULL NULL NULL NULL 177 val_177 +NULL NULL NULL NULL 178 val_178 +NULL NULL NULL NULL 179 val_179 +NULL NULL NULL NULL 179 val_179 +NULL NULL NULL NULL 179 val_179 +NULL NULL NULL NULL 179 val_179 +NULL NULL NULL NULL 179 val_179 +NULL NULL NULL NULL 179 val_179 +NULL NULL NULL NULL 179 val_179 +NULL NULL NULL NULL 179 val_179 +NULL NULL NULL NULL 18 val_18 +NULL NULL NULL NULL 18 val_18 +NULL NULL NULL NULL 18 val_18 +NULL NULL NULL NULL 18 val_18 +NULL NULL NULL NULL 18 val_18 +NULL NULL NULL NULL 18 val_18 +NULL NULL NULL NULL 18 val_18 +NULL NULL NULL NULL 18 val_18 +NULL NULL NULL NULL 180 val_180 +NULL NULL NULL NULL 181 val_181 +NULL NULL NULL NULL 183 val_183 +NULL NULL NULL NULL 186 val_186 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 187 val_187 +NULL NULL NULL NULL 189 val_189 +NULL NULL NULL NULL 19 val_19 +NULL NULL NULL NULL 190 val_190 +NULL NULL NULL NULL 191 val_191 +NULL NULL NULL NULL 191 val_191 +NULL NULL NULL NULL 191 val_191 +NULL NULL NULL NULL 191 val_191 +NULL NULL NULL NULL 191 val_191 +NULL NULL NULL NULL 191 val_191 +NULL NULL NULL NULL 191 val_191 +NULL NULL NULL NULL 191 val_191 +NULL NULL NULL NULL 192 val_192 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 193 val_193 +NULL NULL NULL NULL 194 val_194 +NULL NULL NULL NULL 195 val_195 +NULL NULL NULL NULL 195 val_195 +NULL NULL NULL NULL 195 val_195 +NULL NULL NULL NULL 195 val_195 +NULL NULL NULL NULL 195 val_195 +NULL NULL NULL NULL 195 val_195 +NULL NULL NULL NULL 195 val_195 +NULL NULL NULL NULL 195 val_195 +NULL NULL NULL NULL 196 val_196 +NULL NULL NULL NULL 197 val_197 +NULL NULL NULL NULL 197 val_197 +NULL NULL NULL NULL 197 val_197 +NULL NULL NULL NULL 197 val_197 +NULL NULL NULL NULL 197 val_197 +NULL NULL NULL NULL 197 val_197 +NULL NULL NULL NULL 197 val_197 +NULL NULL NULL NULL 197 val_197 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 199 val_199 +NULL NULL NULL NULL 2 val_2 +NULL NULL NULL NULL 20 val_20 +NULL NULL NULL NULL 200 val_200 +NULL NULL NULL NULL 200 val_200 +NULL NULL NULL NULL 200 val_200 +NULL NULL NULL NULL 200 val_200 +NULL NULL NULL NULL 200 val_200 +NULL NULL NULL NULL 200 val_200 +NULL NULL NULL NULL 200 val_200 +NULL NULL NULL NULL 200 val_200 +NULL NULL NULL NULL 201 val_201 +NULL NULL NULL NULL 202 val_202 +NULL NULL NULL NULL 203 val_203 +NULL NULL NULL NULL 203 val_203 +NULL NULL NULL NULL 203 val_203 +NULL NULL NULL NULL 203 val_203 +NULL NULL NULL NULL 203 val_203 +NULL NULL NULL NULL 203 val_203 +NULL NULL NULL NULL 203 val_203 +NULL NULL NULL NULL 203 val_203 +NULL NULL NULL NULL 205 val_205 +NULL NULL NULL NULL 205 val_205 +NULL NULL NULL NULL 205 val_205 +NULL NULL NULL NULL 205 val_205 +NULL NULL NULL NULL 205 val_205 +NULL NULL NULL NULL 205 val_205 +NULL NULL NULL NULL 205 val_205 +NULL NULL NULL NULL 205 val_205 +NULL NULL NULL NULL 207 val_207 +NULL NULL NULL NULL 207 val_207 +NULL NULL NULL NULL 207 val_207 +NULL NULL NULL NULL 207 val_207 +NULL NULL NULL NULL 207 val_207 +NULL NULL NULL NULL 207 val_207 +NULL NULL NULL NULL 207 val_207 +NULL NULL NULL NULL 207 val_207 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 208 val_208 +NULL NULL NULL NULL 209 val_209 +NULL NULL NULL NULL 209 val_209 +NULL NULL NULL NULL 209 val_209 +NULL NULL NULL NULL 209 val_209 +NULL NULL NULL NULL 209 val_209 +NULL NULL NULL NULL 209 val_209 +NULL NULL NULL NULL 209 val_209 +NULL NULL NULL NULL 209 val_209 +NULL NULL NULL NULL 213 val_213 +NULL NULL NULL NULL 213 val_213 +NULL NULL NULL NULL 213 val_213 +NULL NULL NULL NULL 213 val_213 +NULL NULL NULL NULL 213 val_213 +NULL NULL NULL NULL 213 val_213 +NULL NULL NULL NULL 213 val_213 +NULL NULL NULL NULL 213 val_213 +NULL NULL NULL NULL 214 val_214 +NULL NULL NULL NULL 216 val_216 +NULL NULL NULL NULL 216 val_216 +NULL NULL NULL NULL 216 val_216 +NULL NULL NULL NULL 216 val_216 +NULL NULL NULL NULL 216 val_216 +NULL NULL NULL NULL 216 val_216 +NULL NULL NULL NULL 216 val_216 +NULL NULL NULL NULL 216 val_216 +NULL NULL NULL NULL 217 val_217 +NULL NULL NULL NULL 217 val_217 +NULL NULL NULL NULL 217 val_217 +NULL NULL NULL NULL 217 val_217 +NULL NULL NULL NULL 217 val_217 +NULL NULL NULL NULL 217 val_217 +NULL NULL NULL NULL 217 val_217 +NULL NULL NULL NULL 217 val_217 +NULL NULL NULL NULL 218 val_218 +NULL NULL NULL NULL 219 val_219 +NULL NULL NULL NULL 219 val_219 +NULL NULL NULL NULL 219 val_219 +NULL NULL NULL NULL 219 val_219 +NULL NULL NULL NULL 219 val_219 +NULL NULL NULL NULL 219 val_219 +NULL NULL NULL NULL 219 val_219 +NULL NULL NULL NULL 219 val_219 +NULL NULL NULL NULL 221 val_221 +NULL NULL NULL NULL 221 val_221 +NULL NULL NULL NULL 221 val_221 +NULL NULL NULL NULL 221 val_221 +NULL NULL NULL NULL 221 val_221 +NULL NULL NULL NULL 221 val_221 +NULL NULL NULL NULL 221 val_221 +NULL NULL NULL NULL 221 val_221 +NULL NULL NULL NULL 222 val_222 +NULL NULL NULL NULL 223 val_223 +NULL NULL NULL NULL 223 val_223 +NULL NULL NULL NULL 223 val_223 +NULL NULL NULL NULL 223 val_223 +NULL NULL NULL NULL 223 val_223 +NULL NULL NULL NULL 223 val_223 +NULL NULL NULL NULL 223 val_223 +NULL NULL NULL NULL 223 val_223 +NULL NULL NULL NULL 224 val_224 +NULL NULL NULL NULL 224 val_224 +NULL NULL NULL NULL 224 val_224 +NULL NULL NULL NULL 224 val_224 +NULL NULL NULL NULL 224 val_224 +NULL NULL NULL NULL 224 val_224 +NULL NULL NULL NULL 224 val_224 +NULL NULL NULL NULL 224 val_224 +NULL NULL NULL NULL 226 val_226 +NULL NULL NULL NULL 228 val_228 +NULL NULL NULL NULL 229 val_229 +NULL NULL NULL NULL 229 val_229 +NULL NULL NULL NULL 229 val_229 +NULL NULL NULL NULL 229 val_229 +NULL NULL NULL NULL 229 val_229 +NULL NULL NULL NULL 229 val_229 +NULL NULL NULL NULL 229 val_229 +NULL NULL NULL NULL 229 val_229 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 230 val_230 +NULL NULL NULL NULL 233 val_233 +NULL NULL NULL NULL 233 val_233 +NULL NULL NULL NULL 233 val_233 +NULL NULL NULL NULL 233 val_233 +NULL NULL NULL NULL 233 val_233 +NULL NULL NULL NULL 233 val_233 +NULL NULL NULL NULL 233 val_233 +NULL NULL NULL NULL 233 val_233 +NULL NULL NULL NULL 235 val_235 +NULL NULL NULL NULL 237 val_237 +NULL NULL NULL NULL 237 val_237 +NULL NULL NULL NULL 237 val_237 +NULL NULL NULL NULL 237 val_237 +NULL NULL NULL NULL 237 val_237 +NULL NULL NULL NULL 237 val_237 +NULL NULL NULL NULL 237 val_237 +NULL NULL NULL NULL 237 val_237 +NULL NULL NULL NULL 238 val_238 +NULL NULL NULL NULL 238 val_238 +NULL NULL NULL NULL 238 val_238 +NULL NULL NULL NULL 238 val_238 +NULL NULL NULL NULL 238 val_238 +NULL NULL NULL NULL 238 val_238 +NULL NULL NULL NULL 238 val_238 +NULL NULL NULL NULL 238 val_238 +NULL NULL NULL NULL 239 val_239 +NULL NULL NULL NULL 239 val_239 +NULL NULL NULL NULL 239 val_239 +NULL NULL NULL NULL 239 val_239 +NULL NULL NULL NULL 239 val_239 +NULL NULL NULL NULL 239 val_239 +NULL NULL NULL NULL 239 val_239 +NULL NULL NULL NULL 239 val_239 +NULL NULL NULL NULL 24 val_24 +NULL NULL NULL NULL 24 val_24 +NULL NULL NULL NULL 24 val_24 +NULL NULL NULL NULL 24 val_24 +NULL NULL NULL NULL 24 val_24 +NULL NULL NULL NULL 24 val_24 +NULL NULL NULL NULL 24 val_24 +NULL NULL NULL NULL 24 val_24 +NULL NULL NULL NULL 241 val_241 +NULL NULL NULL NULL 242 val_242 +NULL NULL NULL NULL 242 val_242 +NULL NULL NULL NULL 242 val_242 +NULL NULL NULL NULL 242 val_242 +NULL NULL NULL NULL 242 val_242 +NULL NULL NULL NULL 242 val_242 +NULL NULL NULL NULL 242 val_242 +NULL NULL NULL NULL 242 val_242 +NULL NULL NULL NULL 244 val_244 +NULL NULL NULL NULL 247 val_247 +NULL NULL NULL NULL 248 val_248 +NULL NULL NULL NULL 249 val_249 +NULL NULL NULL NULL 252 val_252 +NULL NULL NULL NULL 255 val_255 +NULL NULL NULL NULL 255 val_255 +NULL NULL NULL NULL 255 val_255 +NULL NULL NULL NULL 255 val_255 +NULL NULL NULL NULL 255 val_255 +NULL NULL NULL NULL 255 val_255 +NULL NULL NULL NULL 255 val_255 +NULL NULL NULL NULL 255 val_255 +NULL NULL NULL NULL 256 val_256 +NULL NULL NULL NULL 256 val_256 +NULL NULL NULL NULL 256 val_256 +NULL NULL NULL NULL 256 val_256 +NULL NULL NULL NULL 256 val_256 +NULL NULL NULL NULL 256 val_256 +NULL NULL NULL NULL 256 val_256 +NULL NULL NULL NULL 256 val_256 +NULL NULL NULL NULL 257 val_257 +NULL NULL NULL NULL 258 val_258 +NULL NULL NULL NULL 26 val_26 +NULL NULL NULL NULL 26 val_26 +NULL NULL NULL NULL 26 val_26 +NULL NULL NULL NULL 26 val_26 +NULL NULL NULL NULL 26 val_26 +NULL NULL NULL NULL 26 val_26 +NULL NULL NULL NULL 26 val_26 +NULL NULL NULL NULL 26 val_26 +NULL NULL NULL NULL 260 val_260 +NULL NULL NULL NULL 262 val_262 +NULL NULL NULL NULL 263 val_263 +NULL NULL NULL NULL 265 val_265 +NULL NULL NULL NULL 265 val_265 +NULL NULL NULL NULL 265 val_265 +NULL NULL NULL NULL 265 val_265 +NULL NULL NULL NULL 265 val_265 +NULL NULL NULL NULL 265 val_265 +NULL NULL NULL NULL 265 val_265 +NULL NULL NULL NULL 265 val_265 +NULL NULL NULL NULL 266 val_266 +NULL NULL NULL NULL 27 val_27 +NULL NULL NULL NULL 272 val_272 +NULL NULL NULL NULL 272 val_272 +NULL NULL NULL NULL 272 val_272 +NULL NULL NULL NULL 272 val_272 +NULL NULL NULL NULL 272 val_272 +NULL NULL NULL NULL 272 val_272 +NULL NULL NULL NULL 272 val_272 +NULL NULL NULL NULL 272 val_272 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 273 val_273 +NULL NULL NULL NULL 274 val_274 +NULL NULL NULL NULL 275 val_275 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 277 val_277 +NULL NULL NULL NULL 278 val_278 +NULL NULL NULL NULL 278 val_278 +NULL NULL NULL NULL 278 val_278 +NULL NULL NULL NULL 278 val_278 +NULL NULL NULL NULL 278 val_278 +NULL NULL NULL NULL 278 val_278 +NULL NULL NULL NULL 278 val_278 +NULL NULL NULL NULL 278 val_278 +NULL NULL NULL NULL 28 val_28 +NULL NULL NULL NULL 280 val_280 +NULL NULL NULL NULL 280 val_280 +NULL NULL NULL NULL 280 val_280 +NULL NULL NULL NULL 280 val_280 +NULL NULL NULL NULL 280 val_280 +NULL NULL NULL NULL 280 val_280 +NULL NULL NULL NULL 280 val_280 +NULL NULL NULL NULL 280 val_280 +NULL NULL NULL NULL 281 val_281 +NULL NULL NULL NULL 281 val_281 +NULL NULL NULL NULL 281 val_281 +NULL NULL NULL NULL 281 val_281 +NULL NULL NULL NULL 281 val_281 +NULL NULL NULL NULL 281 val_281 +NULL NULL NULL NULL 281 val_281 +NULL NULL NULL NULL 281 val_281 +NULL NULL NULL NULL 282 val_282 +NULL NULL NULL NULL 282 val_282 +NULL NULL NULL NULL 282 val_282 +NULL NULL NULL NULL 282 val_282 +NULL NULL NULL NULL 282 val_282 +NULL NULL NULL NULL 282 val_282 +NULL NULL NULL NULL 282 val_282 +NULL NULL NULL NULL 282 val_282 +NULL NULL NULL NULL 283 val_283 +NULL NULL NULL NULL 284 val_284 +NULL NULL NULL NULL 285 val_285 +NULL NULL NULL NULL 286 val_286 +NULL NULL NULL NULL 287 val_287 +NULL NULL NULL NULL 288 val_288 +NULL NULL NULL NULL 288 val_288 +NULL NULL NULL NULL 288 val_288 +NULL NULL NULL NULL 288 val_288 +NULL NULL NULL NULL 288 val_288 +NULL NULL NULL NULL 288 val_288 +NULL NULL NULL NULL 288 val_288 +NULL NULL NULL NULL 288 val_288 +NULL NULL NULL NULL 289 val_289 +NULL NULL NULL NULL 291 val_291 +NULL NULL NULL NULL 292 val_292 +NULL NULL NULL NULL 296 val_296 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 298 val_298 +NULL NULL NULL NULL 30 val_30 +NULL NULL NULL NULL 302 val_302 +NULL NULL NULL NULL 305 val_305 +NULL NULL NULL NULL 306 val_306 +NULL NULL NULL NULL 307 val_307 +NULL NULL NULL NULL 307 val_307 +NULL NULL NULL NULL 307 val_307 +NULL NULL NULL NULL 307 val_307 +NULL NULL NULL NULL 307 val_307 +NULL NULL NULL NULL 307 val_307 +NULL NULL NULL NULL 307 val_307 +NULL NULL NULL NULL 307 val_307 +NULL NULL NULL NULL 308 val_308 +NULL NULL NULL NULL 309 val_309 +NULL NULL NULL NULL 309 val_309 +NULL NULL NULL NULL 309 val_309 +NULL NULL NULL NULL 309 val_309 +NULL NULL NULL NULL 309 val_309 +NULL NULL NULL NULL 309 val_309 +NULL NULL NULL NULL 309 val_309 +NULL NULL NULL NULL 309 val_309 +NULL NULL NULL NULL 310 val_310 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 311 val_311 +NULL NULL NULL NULL 315 val_315 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 316 val_316 +NULL NULL NULL NULL 317 val_317 +NULL NULL NULL NULL 317 val_317 +NULL NULL NULL NULL 317 val_317 +NULL NULL NULL NULL 317 val_317 +NULL NULL NULL NULL 317 val_317 +NULL NULL NULL NULL 317 val_317 +NULL NULL NULL NULL 317 val_317 +NULL NULL NULL NULL 317 val_317 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 318 val_318 +NULL NULL NULL NULL 321 val_321 +NULL NULL NULL NULL 321 val_321 +NULL NULL NULL NULL 321 val_321 +NULL NULL NULL NULL 321 val_321 +NULL NULL NULL NULL 321 val_321 +NULL NULL NULL NULL 321 val_321 +NULL NULL NULL NULL 321 val_321 +NULL NULL NULL NULL 321 val_321 +NULL NULL NULL NULL 322 val_322 +NULL NULL NULL NULL 322 val_322 +NULL NULL NULL NULL 322 val_322 +NULL NULL NULL NULL 322 val_322 +NULL NULL NULL NULL 322 val_322 +NULL NULL NULL NULL 322 val_322 +NULL NULL NULL NULL 322 val_322 +NULL NULL NULL NULL 322 val_322 +NULL NULL NULL NULL 323 val_323 +NULL NULL NULL NULL 325 val_325 +NULL NULL NULL NULL 325 val_325 +NULL NULL NULL NULL 325 val_325 +NULL NULL NULL NULL 325 val_325 +NULL NULL NULL NULL 325 val_325 +NULL NULL NULL NULL 325 val_325 +NULL NULL NULL NULL 325 val_325 +NULL NULL NULL NULL 325 val_325 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 327 val_327 +NULL NULL NULL NULL 33 val_33 +NULL NULL NULL NULL 331 val_331 +NULL NULL NULL NULL 331 val_331 +NULL NULL NULL NULL 331 val_331 +NULL NULL NULL NULL 331 val_331 +NULL NULL NULL NULL 331 val_331 +NULL NULL NULL NULL 331 val_331 +NULL NULL NULL NULL 331 val_331 +NULL NULL NULL NULL 331 val_331 +NULL NULL NULL NULL 332 val_332 +NULL NULL NULL NULL 333 val_333 +NULL NULL NULL NULL 333 val_333 +NULL NULL NULL NULL 333 val_333 +NULL NULL NULL NULL 333 val_333 +NULL NULL NULL NULL 333 val_333 +NULL NULL NULL NULL 333 val_333 +NULL NULL NULL NULL 333 val_333 +NULL NULL NULL NULL 333 val_333 +NULL NULL NULL NULL 335 val_335 +NULL NULL NULL NULL 336 val_336 +NULL NULL NULL NULL 338 val_338 +NULL NULL NULL NULL 339 val_339 +NULL NULL NULL NULL 34 val_34 +NULL NULL NULL NULL 341 val_341 +NULL NULL NULL NULL 342 val_342 +NULL NULL NULL NULL 342 val_342 +NULL NULL NULL NULL 342 val_342 +NULL NULL NULL NULL 342 val_342 +NULL NULL NULL NULL 342 val_342 +NULL NULL NULL NULL 342 val_342 +NULL NULL NULL NULL 342 val_342 +NULL NULL NULL NULL 342 val_342 +NULL NULL NULL NULL 344 val_344 +NULL NULL NULL NULL 344 val_344 +NULL NULL NULL NULL 344 val_344 +NULL NULL NULL NULL 344 val_344 +NULL NULL NULL NULL 344 val_344 +NULL NULL NULL NULL 344 val_344 +NULL NULL NULL NULL 344 val_344 +NULL NULL NULL NULL 344 val_344 +NULL NULL NULL NULL 345 val_345 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 348 val_348 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 35 val_35 +NULL NULL NULL NULL 351 val_351 +NULL NULL NULL NULL 353 val_353 +NULL NULL NULL NULL 353 val_353 +NULL NULL NULL NULL 353 val_353 +NULL NULL NULL NULL 353 val_353 +NULL NULL NULL NULL 353 val_353 +NULL NULL NULL NULL 353 val_353 +NULL NULL NULL NULL 353 val_353 +NULL NULL NULL NULL 353 val_353 +NULL NULL NULL NULL 356 val_356 +NULL NULL NULL NULL 360 val_360 +NULL NULL NULL NULL 362 val_362 +NULL NULL NULL NULL 364 val_364 +NULL NULL NULL NULL 365 val_365 +NULL NULL NULL NULL 366 val_366 +NULL NULL NULL NULL 367 val_367 +NULL NULL NULL NULL 367 val_367 +NULL NULL NULL NULL 367 val_367 +NULL NULL NULL NULL 367 val_367 +NULL NULL NULL NULL 367 val_367 +NULL NULL NULL NULL 367 val_367 +NULL NULL NULL NULL 367 val_367 +NULL NULL NULL NULL 367 val_367 +NULL NULL NULL NULL 368 val_368 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 369 val_369 +NULL NULL NULL NULL 37 val_37 +NULL NULL NULL NULL 37 val_37 +NULL NULL NULL NULL 37 val_37 +NULL NULL NULL NULL 37 val_37 +NULL NULL NULL NULL 37 val_37 +NULL NULL NULL NULL 37 val_37 +NULL NULL NULL NULL 37 val_37 +NULL NULL NULL NULL 37 val_37 +NULL NULL NULL NULL 373 val_373 +NULL NULL NULL NULL 374 val_374 +NULL NULL NULL NULL 375 val_375 +NULL NULL NULL NULL 377 val_377 +NULL NULL NULL NULL 378 val_378 +NULL NULL NULL NULL 379 val_379 +NULL NULL NULL NULL 382 val_382 +NULL NULL NULL NULL 382 val_382 +NULL NULL NULL NULL 382 val_382 +NULL NULL NULL NULL 382 val_382 +NULL NULL NULL NULL 382 val_382 +NULL NULL NULL NULL 382 val_382 +NULL NULL NULL NULL 382 val_382 +NULL NULL NULL NULL 382 val_382 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 384 val_384 +NULL NULL NULL NULL 386 val_386 +NULL NULL NULL NULL 389 val_389 +NULL NULL NULL NULL 392 val_392 +NULL NULL NULL NULL 393 val_393 +NULL NULL NULL NULL 394 val_394 +NULL NULL NULL NULL 395 val_395 +NULL NULL NULL NULL 395 val_395 +NULL NULL NULL NULL 395 val_395 +NULL NULL NULL NULL 395 val_395 +NULL NULL NULL NULL 395 val_395 +NULL NULL NULL NULL 395 val_395 +NULL NULL NULL NULL 395 val_395 +NULL NULL NULL NULL 395 val_395 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 396 val_396 +NULL NULL NULL NULL 397 val_397 +NULL NULL NULL NULL 397 val_397 +NULL NULL NULL NULL 397 val_397 +NULL NULL NULL NULL 397 val_397 +NULL NULL NULL NULL 397 val_397 +NULL NULL NULL NULL 397 val_397 +NULL NULL NULL NULL 397 val_397 +NULL NULL NULL NULL 397 val_397 +NULL NULL NULL NULL 399 val_399 +NULL NULL NULL NULL 399 val_399 +NULL NULL NULL NULL 399 val_399 +NULL NULL NULL NULL 399 val_399 +NULL NULL NULL NULL 399 val_399 +NULL NULL NULL NULL 399 val_399 +NULL NULL NULL NULL 399 val_399 +NULL NULL NULL NULL 399 val_399 +NULL NULL NULL NULL 4 val_4 +NULL NULL NULL NULL 400 val_400 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 401 val_401 +NULL NULL NULL NULL 402 val_402 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 403 val_403 +NULL NULL NULL NULL 404 val_404 +NULL NULL NULL NULL 404 val_404 +NULL NULL NULL NULL 404 val_404 +NULL NULL NULL NULL 404 val_404 +NULL NULL NULL NULL 404 val_404 +NULL NULL NULL NULL 404 val_404 +NULL NULL NULL NULL 404 val_404 +NULL NULL NULL NULL 404 val_404 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 406 val_406 +NULL NULL NULL NULL 407 val_407 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 409 val_409 +NULL NULL NULL NULL 41 val_41 +NULL NULL NULL NULL 411 val_411 +NULL NULL NULL NULL 413 val_413 +NULL NULL NULL NULL 413 val_413 +NULL NULL NULL NULL 413 val_413 +NULL NULL NULL NULL 413 val_413 +NULL NULL NULL NULL 413 val_413 +NULL NULL NULL NULL 413 val_413 +NULL NULL NULL NULL 413 val_413 +NULL NULL NULL NULL 413 val_413 +NULL NULL NULL NULL 414 val_414 +NULL NULL NULL NULL 414 val_414 +NULL NULL NULL NULL 414 val_414 +NULL NULL NULL NULL 414 val_414 +NULL NULL NULL NULL 414 val_414 +NULL NULL NULL NULL 414 val_414 +NULL NULL NULL NULL 414 val_414 +NULL NULL NULL NULL 414 val_414 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 417 val_417 +NULL NULL NULL NULL 418 val_418 +NULL NULL NULL NULL 419 val_419 +NULL NULL NULL NULL 42 val_42 +NULL NULL NULL NULL 42 val_42 +NULL NULL NULL NULL 42 val_42 +NULL NULL NULL NULL 42 val_42 +NULL NULL NULL NULL 42 val_42 +NULL NULL NULL NULL 42 val_42 +NULL NULL NULL NULL 42 val_42 +NULL NULL NULL NULL 42 val_42 +NULL NULL NULL NULL 421 val_421 +NULL NULL NULL NULL 424 val_424 +NULL NULL NULL NULL 424 val_424 +NULL NULL NULL NULL 424 val_424 +NULL NULL NULL NULL 424 val_424 +NULL NULL NULL NULL 424 val_424 +NULL NULL NULL NULL 424 val_424 +NULL NULL NULL NULL 424 val_424 +NULL NULL NULL NULL 424 val_424 +NULL NULL NULL NULL 427 val_427 +NULL NULL NULL NULL 429 val_429 +NULL NULL NULL NULL 429 val_429 +NULL NULL NULL NULL 429 val_429 +NULL NULL NULL NULL 429 val_429 +NULL NULL NULL NULL 429 val_429 +NULL NULL NULL NULL 429 val_429 +NULL NULL NULL NULL 429 val_429 +NULL NULL NULL NULL 429 val_429 +NULL NULL NULL NULL 43 val_43 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 430 val_430 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 431 val_431 +NULL NULL NULL NULL 432 val_432 +NULL NULL NULL NULL 435 val_435 +NULL NULL NULL NULL 436 val_436 +NULL NULL NULL NULL 437 val_437 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 438 val_438 +NULL NULL NULL NULL 439 val_439 +NULL NULL NULL NULL 439 val_439 +NULL NULL NULL NULL 439 val_439 +NULL NULL NULL NULL 439 val_439 +NULL NULL NULL NULL 439 val_439 +NULL NULL NULL NULL 439 val_439 +NULL NULL NULL NULL 439 val_439 +NULL NULL NULL NULL 439 val_439 +NULL NULL NULL NULL 44 val_44 +NULL NULL NULL NULL 443 val_443 +NULL NULL NULL NULL 444 val_444 +NULL NULL NULL NULL 446 val_446 +NULL NULL NULL NULL 448 val_448 +NULL NULL NULL NULL 449 val_449 +NULL NULL NULL NULL 452 val_452 +NULL NULL NULL NULL 453 val_453 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 454 val_454 +NULL NULL NULL NULL 455 val_455 +NULL NULL NULL NULL 457 val_457 +NULL NULL NULL NULL 458 val_458 +NULL NULL NULL NULL 458 val_458 +NULL NULL NULL NULL 458 val_458 +NULL NULL NULL NULL 458 val_458 +NULL NULL NULL NULL 458 val_458 +NULL NULL NULL NULL 458 val_458 +NULL NULL NULL NULL 458 val_458 +NULL NULL NULL NULL 458 val_458 +NULL NULL NULL NULL 459 val_459 +NULL NULL NULL NULL 459 val_459 +NULL NULL NULL NULL 459 val_459 +NULL NULL NULL NULL 459 val_459 +NULL NULL NULL NULL 459 val_459 +NULL NULL NULL NULL 459 val_459 +NULL NULL NULL NULL 459 val_459 +NULL NULL NULL NULL 459 val_459 +NULL NULL NULL NULL 460 val_460 +NULL NULL NULL NULL 462 val_462 +NULL NULL NULL NULL 462 val_462 +NULL NULL NULL NULL 462 val_462 +NULL NULL NULL NULL 462 val_462 +NULL NULL NULL NULL 462 val_462 +NULL NULL NULL NULL 462 val_462 +NULL NULL NULL NULL 462 val_462 +NULL NULL NULL NULL 462 val_462 +NULL NULL NULL NULL 463 val_463 +NULL NULL NULL NULL 463 val_463 +NULL NULL NULL NULL 463 val_463 +NULL NULL NULL NULL 463 val_463 +NULL NULL NULL NULL 463 val_463 +NULL NULL NULL NULL 463 val_463 +NULL NULL NULL NULL 463 val_463 +NULL NULL NULL NULL 463 val_463 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 466 val_466 +NULL NULL NULL NULL 467 val_467 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 468 val_468 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 469 val_469 +NULL NULL NULL NULL 47 val_47 +NULL NULL NULL NULL 470 val_470 +NULL NULL NULL NULL 472 val_472 +NULL NULL NULL NULL 475 val_475 +NULL NULL NULL NULL 477 val_477 +NULL NULL NULL NULL 478 val_478 +NULL NULL NULL NULL 478 val_478 +NULL NULL NULL NULL 478 val_478 +NULL NULL NULL NULL 478 val_478 +NULL NULL NULL NULL 478 val_478 +NULL NULL NULL NULL 478 val_478 +NULL NULL NULL NULL 478 val_478 +NULL NULL NULL NULL 478 val_478 +NULL NULL NULL NULL 479 val_479 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 480 val_480 +NULL NULL NULL NULL 481 val_481 +NULL NULL NULL NULL 482 val_482 +NULL NULL NULL NULL 483 val_483 +NULL NULL NULL NULL 484 val_484 +NULL NULL NULL NULL 485 val_485 +NULL NULL NULL NULL 487 val_487 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 489 val_489 +NULL NULL NULL NULL 490 val_490 +NULL NULL NULL NULL 491 val_491 +NULL NULL NULL NULL 492 val_492 +NULL NULL NULL NULL 492 val_492 +NULL NULL NULL NULL 492 val_492 +NULL NULL NULL NULL 492 val_492 +NULL NULL NULL NULL 492 val_492 +NULL NULL NULL NULL 492 val_492 +NULL NULL NULL NULL 492 val_492 +NULL NULL NULL NULL 492 val_492 +NULL NULL NULL NULL 493 val_493 +NULL NULL NULL NULL 494 val_494 +NULL NULL NULL NULL 495 val_495 +NULL NULL NULL NULL 496 val_496 +NULL NULL NULL NULL 497 val_497 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 498 val_498 +NULL NULL NULL NULL 5 val_5 +NULL NULL NULL NULL 5 val_5 +NULL NULL NULL NULL 5 val_5 +NULL NULL NULL NULL 51 val_51 +NULL NULL NULL NULL 51 val_51 +NULL NULL NULL NULL 51 val_51 +NULL NULL NULL NULL 51 val_51 +NULL NULL NULL NULL 51 val_51 +NULL NULL NULL NULL 51 val_51 +NULL NULL NULL NULL 51 val_51 +NULL NULL NULL NULL 51 val_51 +NULL NULL NULL NULL 53 val_53 +NULL NULL NULL NULL 54 val_54 +NULL NULL NULL NULL 57 val_57 +NULL NULL NULL NULL 58 val_58 +NULL NULL NULL NULL 58 val_58 +NULL NULL NULL NULL 58 val_58 +NULL NULL NULL NULL 58 val_58 +NULL NULL NULL NULL 58 val_58 +NULL NULL NULL NULL 58 val_58 +NULL NULL NULL NULL 58 val_58 +NULL NULL NULL NULL 58 val_58 +NULL NULL NULL NULL 64 val_64 +NULL NULL NULL NULL 65 val_65 +NULL NULL NULL NULL 66 val_66 +NULL NULL NULL NULL 67 val_67 +NULL NULL NULL NULL 67 val_67 +NULL NULL NULL NULL 67 val_67 +NULL NULL NULL NULL 67 val_67 +NULL NULL NULL NULL 67 val_67 +NULL NULL NULL NULL 67 val_67 +NULL NULL NULL NULL 67 val_67 +NULL NULL NULL NULL 67 val_67 +NULL NULL NULL NULL 69 val_69 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 70 val_70 +NULL NULL NULL NULL 72 val_72 +NULL NULL NULL NULL 72 val_72 +NULL NULL NULL NULL 72 val_72 +NULL NULL NULL NULL 72 val_72 +NULL NULL NULL NULL 72 val_72 +NULL NULL NULL NULL 72 val_72 +NULL NULL NULL NULL 72 val_72 +NULL NULL NULL NULL 72 val_72 +NULL NULL NULL NULL 74 val_74 +NULL NULL NULL NULL 76 val_76 +NULL NULL NULL NULL 76 val_76 +NULL NULL NULL NULL 76 val_76 +NULL NULL NULL NULL 76 val_76 +NULL NULL NULL NULL 76 val_76 +NULL NULL NULL NULL 76 val_76 +NULL NULL NULL NULL 76 val_76 +NULL NULL NULL NULL 76 val_76 +NULL NULL NULL NULL 77 val_77 +NULL NULL NULL NULL 78 val_78 +NULL NULL NULL NULL 8 val_8 +NULL NULL NULL NULL 80 val_80 +NULL NULL NULL NULL 82 val_82 +NULL NULL NULL NULL 83 val_83 +NULL NULL NULL NULL 83 val_83 +NULL NULL NULL NULL 83 val_83 +NULL NULL NULL NULL 83 val_83 +NULL NULL NULL NULL 83 val_83 +NULL NULL NULL NULL 83 val_83 +NULL NULL NULL NULL 83 val_83 +NULL NULL NULL NULL 83 val_83 +NULL NULL NULL NULL 84 val_84 +NULL NULL NULL NULL 84 val_84 +NULL NULL NULL NULL 84 val_84 +NULL NULL NULL NULL 84 val_84 +NULL NULL NULL NULL 84 val_84 +NULL NULL NULL NULL 84 val_84 +NULL NULL NULL NULL 84 val_84 +NULL NULL NULL NULL 84 val_84 +NULL NULL NULL NULL 85 val_85 +NULL NULL NULL NULL 86 val_86 +NULL NULL NULL NULL 87 val_87 +NULL NULL NULL NULL 9 val_9 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 90 val_90 +NULL NULL NULL NULL 92 val_92 +NULL NULL NULL NULL 95 val_95 +NULL NULL NULL NULL 95 val_95 +NULL NULL NULL NULL 95 val_95 +NULL NULL NULL NULL 95 val_95 +NULL NULL NULL NULL 95 val_95 +NULL NULL NULL NULL 95 val_95 +NULL NULL NULL NULL 95 val_95 +NULL NULL NULL NULL 95 val_95 +NULL NULL NULL NULL 96 val_96 +NULL NULL NULL NULL 97 val_97 +NULL NULL NULL NULL 97 val_97 +NULL NULL NULL NULL 97 val_97 +NULL NULL NULL NULL 97 val_97 +NULL NULL NULL NULL 97 val_97 +NULL NULL NULL NULL 97 val_97 +NULL NULL NULL NULL 97 val_97 +NULL NULL NULL NULL 97 val_97 +NULL NULL NULL NULL 98 val_98 +NULL NULL NULL NULL 98 val_98 +NULL NULL NULL NULL 98 val_98 +NULL NULL NULL NULL 98 val_98 +NULL NULL NULL NULL 98 val_98 +NULL NULL NULL NULL 98 val_98 +NULL NULL NULL NULL 98 val_98 +NULL NULL NULL NULL 98 val_98 Index: ql/src/test/results/clientpositive/auto_join8.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join8.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join8.q.out (revision 0) @@ -0,0 +1,54 @@ +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@dest1 +PREHOOK: query: FROM ( + FROM + ( + FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 + ) a + LEFT OUTER JOIN + ( + FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 + ) b + ON (a.c1 = b.c3) + SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 +) c +INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM ( + FROM + ( + FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 + ) a + LEFT OUTER JOIN + ( + FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 + ) b + ON (a.c1 = b.c3) + SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 +) c +INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_14-00-11_196_7059792850585370742/-mr-10000 +POSTHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_14-00-11_196_7059792850585370742/-mr-10000 +POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +-7158439905 Index: ql/src/test/results/clientpositive/auto_join18.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join18.q.out (revision 0) +++ ql/src/test/results/clientpositive/auto_join18.q.out (revision 0) @@ -0,0 +1,31 @@ +PREHOOK: query: SELECT sum(hash(a.key, a.value, b.key, b.value)) + FROM + ( + SELECT src1.key as key, count(src1.value) AS value FROM src src1 group by src1.key + ) a + FULL OUTER JOIN + ( + SELECT src2.key as key, count(distinct(src2.value)) AS value + FROM src1 src2 group by src2.key + ) b + ON (a.key = b.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-58-01_044_8197590748248491386/-mr-10000 +POSTHOOK: query: SELECT sum(hash(a.key, a.value, b.key, b.value)) + FROM + ( + SELECT src1.key as key, count(src1.value) AS value FROM src src1 group by src1.key + ) a + FULL OUTER JOIN + ( + SELECT src2.key as key, count(distinct(src2.value)) AS value + FROM src1 src2 group by src2.key + ) b + ON (a.key = b.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-11_13-58-01_044_8197590748248491386/-mr-10000 +379685492277 Index: ql/src/test/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java (revision 1034599) +++ ql/src/test/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java (working copy) @@ -22,29 +22,39 @@ import java.util.Set; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.QTestUtil; +import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.security.UserGroupInformation; /** - * Implementation of a pre execute hook that prevents modifications - * of read-only tables used by the test framework + * Implementation of a pre execute hook that prevents modifications of read-only tables used by the + * test framework */ public class EnforceReadOnlyTables implements PreExecute { @Override - public void run(SessionState sess, Set inputs, - Set outputs, UserGroupInformation ugi) - throws Exception { + public void run(HookContext hookContext) throws Exception { + SessionState sessionState = SessionState.get(); + QueryPlan plan = hookContext.getQueryPlan(); + HiveConf conf = hookContext.getConf(); - for (WriteEntity w: outputs) { - if ((w.getTyp() == WriteEntity.Type.TABLE) || - (w.getTyp() == WriteEntity.Type.PARTITION)) { + this.run(sessionState, plan.getInputs(), plan.getOutputs(), ShimLoader.getHadoopShims() + .getUGIForConf(conf)); + } + + public void run(SessionState sess, Set inputs, Set outputs, + UserGroupInformation ugi) throws Exception { + + for (WriteEntity w : outputs) { + if ((w.getTyp() == WriteEntity.Type.TABLE) || (w.getTyp() == WriteEntity.Type.PARTITION)) { Table t = w.getTable(); if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(t.getDbName()) && QTestUtil.srcTables.contains(t.getTableName())) { - throw new RuntimeException ("Cannot overwrite read-only table: " + t.getTableName()); + throw new RuntimeException("Cannot overwrite read-only table: " + t.getTableName()); } } } Index: ql/src/test/org/apache/hadoop/hive/ql/hooks/PostExecutePrinter.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/hooks/PostExecutePrinter.java (revision 1034599) +++ ql/src/test/org/apache/hadoop/hive/ql/hooks/PostExecutePrinter.java (working copy) @@ -25,13 +25,16 @@ import java.util.Map; import java.util.Set; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.hooks.LineageInfo.BaseColumnInfo; import org.apache.hadoop.hive.ql.hooks.LineageInfo.Dependency; import org.apache.hadoop.hive.ql.hooks.LineageInfo.DependencyKey; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; +import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.security.UserGroupInformation; /** @@ -93,7 +96,16 @@ } } - @Override + public void run(HookContext hookContext) throws Exception { + SessionState sessionState = SessionState.get(); + QueryPlan plan = hookContext.getQueryPlan(); + HiveConf conf = hookContext.getConf(); + LineageInfo linfo = (sessionState != null ? sessionState.getLineageState().getLineageInfo() : null); + + this.run(sessionState, plan.getInputs(), plan.getOutputs(),linfo, + ShimLoader.getHadoopShims().getUGIForConf(conf)); + } + public void run(SessionState sess, Set inputs, Set outputs, LineageInfo linfo, UserGroupInformation ugi) throws Exception { Index: ql/src/test/org/apache/hadoop/hive/ql/hooks/PreExecutePrinter.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/hooks/PreExecutePrinter.java (revision 1034599) +++ ql/src/test/org/apache/hadoop/hive/ql/hooks/PreExecutePrinter.java (working copy) @@ -23,21 +23,31 @@ import java.util.List; import java.util.Set; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; +import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.security.UserGroupInformation; /** - * Implementation of a pre execute hook that simply prints out its parameters to - * standard output. + * Implementation of a pre execute hook that simply prints out its parameters to standard output. */ public class PreExecutePrinter implements PreExecute { @Override - public void run(SessionState sess, Set inputs, - Set outputs, UserGroupInformation ugi) - throws Exception { + public void run(HookContext hookContext) throws Exception { + SessionState sessionState = SessionState.get(); + QueryPlan plan = hookContext.getQueryPlan(); + HiveConf conf = hookContext.getConf(); + this.run(sessionState, plan.getInputs(), plan.getOutputs(), ShimLoader.getHadoopShims() + .getUGIForConf(conf)); + } + + public void run(SessionState sess, Set inputs, Set outputs, + UserGroupInformation ugi) throws Exception { + LogHelper console = SessionState.getConsole(); if (console == null) { Index: ql/src/test/queries/clientpositive/auto_join18_multi_distinct.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join18_multi_distinct.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join18_multi_distinct.q (revision 0) @@ -0,0 +1,14 @@ +set hive.auto.convert.join = false; + + SELECT sum(hash(a.key, a.value, b.key, b.value1, b.value2)) + FROM + ( + SELECT src1.key as key, count(src1.value) AS value FROM src src1 group by src1.key + ) a + FULL OUTER JOIN + ( + SELECT src2.key as key, count(distinct(src2.value)) AS value1, + count(distinct(src2.key)) AS value2 + FROM src1 src2 group by src2.key + ) b + ON (a.key = b.key); Index: ql/src/test/queries/clientpositive/auto_join10.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join10.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join10.q (revision 0) @@ -0,0 +1,10 @@ +set hive.auto.convert.join = false; + + +FROM +(SELECT src.* FROM src) x +JOIN +(SELECT src.* FROM src) Y +ON (x.key = Y.key) +select sum(hash(Y.key,Y.value)) + Index: ql/src/test/queries/clientpositive/auto_join11.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join11.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join11.q (revision 0) @@ -0,0 +1,9 @@ +set hive.auto.convert.join = false; + + +SELECT sum(hash(src1.c1, src2.c4)) +FROM +(SELECT src.key as c1, src.value as c2 from src) src1 +JOIN +(SELECT src.key as c3, src.value as c4 from src) src2 +ON src1.c1 = src2.c3 AND src1.c1 < 100; Index: ql/src/test/queries/clientpositive/auto_join20.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join20.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join20.q (revision 0) @@ -0,0 +1,16 @@ +set hive.auto.convert.join = false; + +select sum(hash(a.k1,a.v1,a.k2,a.v2,a.k3,a.v3)) +from ( +SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 , src3.key as k3, src3.value as v3 +FROM src src1 JOIN src src2 ON (src1.key = src2.key AND src1.key < 10) RIGHT OUTER JOIN src src3 ON (src1.key = src3.key AND src3.key < 20) +SORT BY k1,v1,k2,v2,k3,v3 +)a; + + +select sum(hash(a.k1,a.v1,a.k2,a.v2,a.k3,a.v3)) +from ( +SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 , src3.key as k3, src3.value as v3 +FROM src src1 JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key < 15) RIGHT OUTER JOIN src src3 ON (src1.key = src3.key AND src3.key < 20) +SORT BY k1,v1,k2,v2,k3,v3 +)a; Index: ql/src/test/queries/clientpositive/auto_join0.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join0.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join0.q (revision 0) @@ -0,0 +1,13 @@ + +set hive.auto.convert.join = true; + + +select sum(hash(a.k1,a.v1,a.k2, a.v2)) +from ( +SELECT src1.key as k1, src1.value as v1, + src2.key as k2, src2.value as v2 FROM + (SELECT * FROM src WHERE src.key < 10) src1 + JOIN + (SELECT * FROM src WHERE src.key < 10) src2 + SORT BY k1, v1, k2, v2 +) a; Index: ql/src/test/queries/clientpositive/auto_join12.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join12.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join12.q (revision 0) @@ -0,0 +1,15 @@ + + +set hive.auto.convert.join = false; + + + +SELECT sum(hash(src1.c1, src2.c4)) +FROM +(SELECT src.key as c1, src.value as c2 from src) src1 +JOIN +(SELECT src.key as c3, src.value as c4 from src) src2 +ON src1.c1 = src2.c3 AND src1.c1 < 100 +JOIN +(SELECT src.key as c5, src.value as c6 from src) src3 +ON src1.c1 = src3.c5 AND src3.c5 < 80; Index: ql/src/test/queries/clientpositive/auto_join21.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join21.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join21.q (revision 0) @@ -0,0 +1,4 @@ +set hive.auto.convert.join = true; + + +SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value; Index: ql/src/test/queries/clientpositive/auto_join1.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join1.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join1.q (revision 0) @@ -0,0 +1,8 @@ +set hive.auto.convert.join =true; + +CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE; + +FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value; + +SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1; \ No newline at end of file Index: ql/src/test/queries/clientpositive/auto_join13.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join13.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join13.q (revision 0) @@ -0,0 +1,13 @@ + +set hive.auto.convert.join = false; + + +SELECT sum(hash(src1.c1, src2.c4)) +FROM +(SELECT src.key as c1, src.value as c2 from src) src1 +JOIN +(SELECT src.key as c3, src.value as c4 from src) src2 +ON src1.c1 = src2.c3 AND src1.c1 < 100 +JOIN +(SELECT src.key as c5, src.value as c6 from src) src3 +ON src1.c1 + src2.c3 = src3.c5 AND src3.c5 < 200; Index: ql/src/test/queries/clientpositive/auto_join22.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join22.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join22.q (revision 0) @@ -0,0 +1,3 @@ +set hive.auto.convert.join = false; + +SELECT sum(hash(src5.src1_value)) FROM (SELECT src3.*, src4.value as src4_value, src4.key as src4_key FROM src src4 JOIN (SELECT src2.*, src1.key as src1_key, src1.value as src1_value FROM src src1 JOIN src src2 ON src1.key = src2.key) src3 ON src3.src1_key = src4.key) src5; Index: ql/src/test/queries/clientpositive/auto_join2.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join2.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join2.q (revision 0) @@ -0,0 +1,8 @@ +set hive.auto.convert.join = false; + +CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE; + +FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) +INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value; + +SELECT sum(hash(dest_j2.key,dest_j2.value)) FROM dest_j2; Index: ql/src/test/queries/clientpositive/auto_join14.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join14.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join14.q (revision 0) @@ -0,0 +1,13 @@ + +set hive.auto.convert.join = false; + +CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE; + +set mapred.job.tracker=does.notexist.com:666; +set hive.exec.mode.local.auto=true; + + +FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 +INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value; + +SELECT sum(hash(dest1.c1,dest1.c2)) FROM dest1; Index: ql/src/test/queries/clientpositive/auto_join23.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join23.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join23.q (revision 0) @@ -0,0 +1,4 @@ +set hive.auto.convert.join = false; + + +SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value; Index: ql/src/test/queries/clientpositive/auto_join3.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join3.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join3.q (revision 0) @@ -0,0 +1,9 @@ +set hive.auto.convert.join = true; + +CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; + + +FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) +INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value; + +SELECT sum(hash(dest1.key,dest1.value)) FROM dest1; \ No newline at end of file Index: ql/src/test/queries/clientpositive/auto_join15.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join15.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join15.q (revision 0) @@ -0,0 +1,11 @@ + +set hive.auto.convert.join = false; + + +select sum(hash(a.k1,a.v1,a.k2, a.v2)) +from ( +SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 +FROM src src1 JOIN src src2 ON (src1.key = src2.key) +SORT BY k1, v1, k2, v2 +) a; + Index: ql/src/test/queries/clientpositive/auto_join24.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join24.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join24.q (revision 0) @@ -0,0 +1,10 @@ +set hive.auto.convert.join = false; + +create table tst1(key STRING, cnt INT); + +INSERT OVERWRITE TABLE tst1 +SELECT a.key, count(1) FROM src a group by a.key; + +SELECT sum(a.cnt) FROM tst1 a JOIN tst1 b ON a.key = b.key; + + Index: ql/src/test/queries/clientpositive/auto_join4.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join4.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join4.q (revision 0) @@ -0,0 +1,20 @@ +set hive.auto.convert.join = false; + +CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; + + +FROM ( + FROM + ( + FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 + ) a + LEFT OUTER JOIN + ( + FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 + ) b + ON (a.c1 = b.c3) + SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 +) c +INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4; + +SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1; Index: ql/src/test/queries/clientpositive/auto_join16.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join16.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join16.q (revision 0) @@ -0,0 +1,12 @@ + +set hive.auto.convert.join = false; + + + + +SELECT sum(hash(subq.key, tab.value)) +FROM +(select a.key, a.value from src a where a.key > 10 ) subq +JOIN src tab +ON (subq.key = tab.key and subq.key > 20 and subq.value = tab.value) +where tab.value < 200; \ No newline at end of file Index: ql/src/test/queries/clientpositive/auto_join25.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join25.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join25.q (revision 0) @@ -0,0 +1,28 @@ +set hive.auto.convert.join = true; +set hive.hashtable.max.memory.usage = 0.0001; +set hive.hashtable.scale = 2; + +CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; + +FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11'); + +SELECT sum(hash(dest1.key,dest1.value)) FROM dest1; + + + +CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE; + +FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) +INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value; + +SELECT sum(hash(dest_j2.key,dest_j2.value)) FROM dest_j2; + +CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE; + +FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value; + +SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1; + Index: ql/src/test/queries/clientpositive/auto_join5.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join5.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join5.q (revision 0) @@ -0,0 +1,19 @@ +set hive.auto.convert.join = false; + +CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; + +FROM ( + FROM + ( + FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 + ) a + RIGHT OUTER JOIN + ( + FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 + ) b + ON (a.c1 = b.c3) + SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 +) c +INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4; + +SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1; Index: ql/src/test/queries/clientpositive/auto_join17.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join17.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join17.q (revision 0) @@ -0,0 +1,10 @@ + +set hive.auto.convert.join = false; + +CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE; + + +FROM src src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*; + +SELECT sum(hash(dest1.key1,dest1.value1,dest1.key2,dest1.value2)) FROM dest1; \ No newline at end of file Index: ql/src/test/queries/clientpositive/auto_join_nulls.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join_nulls.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join_nulls.q (revision 0) @@ -0,0 +1,29 @@ +set hive.auto.convert.join = true; + +CREATE TABLE myinput1(key int, value int); +LOAD DATA LOCAL INPATH '../data/files/in1.txt' INTO TABLE myinput1; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value; + Index: ql/src/test/queries/clientpositive/auto_join6.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join6.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join6.q (revision 0) @@ -0,0 +1,21 @@ +set hive.auto.convert.join = false; + +CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; + + +FROM ( + FROM + ( + FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 + ) a + FULL OUTER JOIN + ( + FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 + ) b + ON (a.c1 = b.c3) + SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 +) c +INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4; + + +SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1; Index: ql/src/test/queries/clientpositive/auto_join18.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join18.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join18.q (revision 0) @@ -0,0 +1,14 @@ + +set hive.auto.convert.join = false; + + SELECT sum(hash(a.key, a.value, b.key, b.value)) + FROM + ( + SELECT src1.key as key, count(src1.value) AS value FROM src src1 group by src1.key + ) a + FULL OUTER JOIN + ( + SELECT src2.key as key, count(distinct(src2.value)) AS value + FROM src1 src2 group by src2.key + ) b + ON (a.key = b.key); Index: ql/src/test/queries/clientpositive/auto_join7.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join7.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join7.q (revision 0) @@ -0,0 +1,25 @@ +set hive.auto.convert.join = false; + +CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE; + +FROM ( + FROM + ( + FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 + ) a + FULL OUTER JOIN + ( + FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 + ) b + ON (a.c1 = b.c3) + LEFT OUTER JOIN + ( + FROM src src3 SELECT src3.key AS c5, src3.value AS c6 WHERE src3.key > 20 and src3.key < 25 + ) c + ON (a.c1 = c.c5) + SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 +) c +INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6; + + +SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4,dest1.c5,dest1.c6)) FROM dest1; Index: ql/src/test/queries/clientpositive/auto_join19.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join19.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join19.q (revision 0) @@ -0,0 +1,11 @@ +set hive.auto.convert.join = false; + +CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; + + +FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11'); + + +SELECT sum(hash(dest1.key,dest1.value)) FROM dest1; Index: ql/src/test/queries/clientpositive/auto_join8.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join8.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join8.q (revision 0) @@ -0,0 +1,19 @@ +set hive.auto.convert.join = false; + +CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; + +FROM ( + FROM + ( + FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 + ) a + LEFT OUTER JOIN + ( + FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 + ) b + ON (a.c1 = b.c3) + SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 +) c +INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL; + +SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1; Index: ql/src/test/queries/clientpositive/auto_join9.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join9.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join9.q (revision 0) @@ -0,0 +1,10 @@ +set hive.auto.convert.join = false; + +CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; + +FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) +INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12'; + + + +SELECT sum(hash(dest1.key,dest1.value)) FROM dest1; Index: ql/src/test/queries/clientpositive/auto_join_filters.q =================================================================== --- ql/src/test/queries/clientpositive/auto_join_filters.q (revision 0) +++ ql/src/test/queries/clientpositive/auto_join_filters.q (revision 0) @@ -0,0 +1,82 @@ +set hive.auto.convert.join = true; + +CREATE TABLE myinput1(key int, value int); +LOAD DATA LOCAL INPATH '../data/files/in3.txt' INTO TABLE myinput1; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; + + +CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; +LOAD DATA LOCAL INPATH '../data/files/in1.txt' into table smb_input1; +LOAD DATA LOCAL INPATH '../data/files/in2.txt' into table smb_input1; +LOAD DATA LOCAL INPATH '../data/files/in1.txt' into table smb_input2; +LOAD DATA LOCAL INPATH '../data/files/in2.txt' into table smb_input2; + +SET hive.optimize.bucketmapjoin = true; +SET hive.optimize.bucketmapjoin.sortedmerge = true; +SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; + +SET hive.outerjoin.supports.filters = false; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; Index: ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java (working copy) @@ -19,11 +19,12 @@ package org.apache.hadoop.hive.ql; import java.io.Serializable; +import java.util.HashMap; import java.util.LinkedList; +import java.util.List; import java.util.Queue; import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.mapred.JobConf; /** * DriverContext. @@ -32,7 +33,10 @@ public class DriverContext { Queue> runnable = new LinkedList>(); + HashMap, Task> backupTaskMap = new HashMap, Task>(); + HashMap, List>> backupChildTaskMap = new HashMap, List>>(); + // how many jobs have been started int curJobNo; @@ -82,5 +86,33 @@ public void incCurJobNo(int amount) { this.curJobNo = this.curJobNo + amount; } - + + public void addBackUpTaskMapping(Task task, + Task backupTask) { + this.backupTaskMap.put(task, backupTask); + } + + public Task getBackupTask(Task task) { + Task backupTask = this.backupTaskMap.get(task); + if (backupTask != null) { + List> backupChildrenTasks = this.backupChildTaskMap.get(backupTask); + Task oldParentTask = null; + if(task.isMapRedLocalTask()){ + oldParentTask = task.getChildTasks().get(0); + }else{ + oldParentTask = task; + } + for(Task childTsk : backupChildrenTasks) { + childTsk.getParentTasks().remove(oldParentTask); + childTsk.getParentTasks().add(backupTask); + } + + } + return backupTask; + } + + public void addBackUpChildrenTasks(Task task, + List> childrenTasks) { + this.backupChildTaskMap.put(task, childrenTasks); + } } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkDeDuplication.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkDeDuplication.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkDeDuplication.java (working copy) @@ -205,7 +205,7 @@ Operator input = parentOp.get(0); input.getChildOperators().clear(); - RowResolver inputRR = pGraphContext.getOpParseCtx().get(input).getRR(); + RowResolver inputRR = pGraphContext.getOpParseCtx().get(input).getRowResolver(); ArrayList exprs = new ArrayList(); ArrayList outputs = new ArrayList(); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java (working copy) @@ -179,7 +179,7 @@ cppCtx.getPrunedColLists().put((Operator) nd, cols); ArrayList needed_columns = new ArrayList(); - RowResolver inputRR = cppCtx.getOpToParseCtxMap().get(scanOp).getRR(); + RowResolver inputRR = cppCtx.getOpToParseCtxMap().get(scanOp).getRowResolver(); TableScanDesc desc = scanOp.getConf(); List virtualCols = desc.getVirtualCols(); List newVirtualCols = new ArrayList(); @@ -232,7 +232,7 @@ ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx; HashMap, OpParseContext> opToParseCtxMap = cppCtx .getOpToParseCtxMap(); - RowResolver redSinkRR = opToParseCtxMap.get(op).getRR(); + RowResolver redSinkRR = opToParseCtxMap.get(op).getRowResolver(); ReduceSinkDesc conf = op.getConf(); List> childOperators = op .getChildOperators(); @@ -250,7 +250,7 @@ assert parentOperators.size() == 1; Operator par = parentOperators.get(0); JoinOperator childJoin = (JoinOperator) childOperators.get(0); - RowResolver parRR = opToParseCtxMap.get(par).getRR(); + RowResolver parRR = opToParseCtxMap.get(par).getRowResolver(); List childJoinCols = cppCtx.getJoinPrunedColLists().get( childJoin).get((byte) conf.getTag()); boolean[] flags = new boolean[conf.getValueCols().size()]; @@ -383,7 +383,7 @@ ArrayList newOutputColumnNames = new ArrayList(); ArrayList rs_oldsignature = op.getSchema().getSignature(); ArrayList rs_newsignature = new ArrayList(); - RowResolver old_rr = cppCtx.getOpToParseCtxMap().get(op).getRR(); + RowResolver old_rr = cppCtx.getOpToParseCtxMap().get(op).getRowResolver(); RowResolver new_rr = new RowResolver(); for (String col : cols) { int index = originalOutputColumnNames.indexOf(col); @@ -394,7 +394,7 @@ ColumnInfo columnInfo = old_rr.get(tabcol[0], tabcol[1]); new_rr.put(tabcol[0], tabcol[1], columnInfo); } - cppCtx.getOpToParseCtxMap().get(op).setRR(new_rr); + cppCtx.getOpToParseCtxMap().get(op).setRowResolver(new_rr); op.getSchema().setSignature(rs_newsignature); conf.setColList(newColList); conf.setOutputColumnNames(newOutputColumnNames); @@ -465,7 +465,7 @@ Map oldMap = reduce.getColumnExprMap(); Map newMap = new HashMap(); ArrayList sig = new ArrayList(); - RowResolver oldRR = cppCtx.getOpToParseCtxMap().get(reduce).getRR(); + RowResolver oldRR = cppCtx.getOpToParseCtxMap().get(reduce).getRowResolver(); RowResolver newRR = new RowResolver(); ArrayList originalValueOutputColNames = reduceConf .getOutputValueColumnNames(); @@ -493,7 +493,7 @@ ArrayList keyCols = reduceConf.getKeyCols(); List keys = new ArrayList(); RowResolver parResover = cppCtx.getOpToParseCtxMap().get( - reduce.getParentOperators().get(0)).getRR(); + reduce.getParentOperators().get(0)).getRowResolver(); for (int i = 0; i < keyCols.size(); i++) { keys = Utilities.mergeUniqElems(keys, keyCols.get(i).getCols()); } @@ -506,7 +506,7 @@ } } - cppCtx.getOpToParseCtxMap().get(reduce).setRR(newRR); + cppCtx.getOpToParseCtxMap().get(reduce).setRowResolver(newRR); reduce.setColumnExprMap(newMap); reduce.getSchema().setSignature(sig); reduceConf.setOutputValueColumnNames(newOutputColNames); @@ -614,7 +614,7 @@ } } - RowResolver joinRR = cppCtx.getOpToParseCtxMap().get(op).getRR(); + RowResolver joinRR = cppCtx.getOpToParseCtxMap().get(op).getRowResolver(); RowResolver newJoinRR = new RowResolver(); ArrayList outputCols = new ArrayList(); ArrayList rs = new ArrayList(); @@ -699,7 +699,7 @@ op.setColumnExprMap(newColExprMap); conf.setOutputColumnNames(outputCols); op.getSchema().setSignature(rs); - cppCtx.getOpToParseCtxMap().get(op).setRR(newJoinRR); + cppCtx.getOpToParseCtxMap().get(op).setRowResolver(newJoinRR); cppCtx.getJoinPrunedColLists().put(op, prunedColLists); } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java (working copy) @@ -217,7 +217,7 @@ // Add the extract operator to get the value fields RowResolver out_rwsch = new RowResolver(); - RowResolver interim_rwsch = ctx.getParseCtx().getOpParseCtx().get(fsOp).getRR(); + RowResolver interim_rwsch = ctx.getParseCtx().getOpParseCtx().get(fsOp).getRowResolver(); Integer pos = Integer.valueOf(0); for (ColumnInfo colInfo : interim_rwsch.getColumnInfos()) { String[] info = interim_rwsch.reverseLookup(colInfo.getInternalName()); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (working copy) @@ -858,8 +858,9 @@ // create a dummy tableScan operator on top of op // TableScanOperator is implicitly created here for each MapOperator + RowResolver rowResolver = opProcCtx.getParseCtx().getOpParseCtx().get(parent).getRowResolver(); Operator ts_op = putOpInsertMap(OperatorFactory - .get(TableScanDesc.class, parent.getSchema()), null, parseCtx); + .get(TableScanDesc.class, parent.getSchema()), rowResolver, parseCtx); childOpList = new ArrayList>(); childOpList.add(op); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MapJoinResolver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MapJoinResolver.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MapJoinResolver.java (working copy) @@ -42,11 +42,16 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.lib.TaskGraphWalker; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ConditionalResolver; +import org.apache.hadoop.hive.ql.plan.ConditionalResolverCommonJoin; +import org.apache.hadoop.hive.ql.plan.ConditionalResolverSkewJoin; import org.apache.hadoop.hive.ql.plan.ConditionalWork; import org.apache.hadoop.hive.ql.plan.MapredLocalWork; import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.ConditionalResolverCommonJoin.ConditionalResolverCommonJoinCtx; import org.apache.hadoop.hive.ql.plan.ConditionalResolverSkewJoin.ConditionalResolverSkewJoinCtx; /** @@ -60,7 +65,7 @@ //create dispatcher and graph walker Dispatcher disp = new LocalMapJoinTaskDispatcher(pctx); - GraphWalker ogw = new DefaultGraphWalker(disp); + TaskGraphWalker ogw = new TaskGraphWalker(disp); //get all the tasks nodes from root task ArrayList topNodes = new ArrayList(); @@ -149,31 +154,64 @@ listWork.set(index,(Serializable)localwork); conditionalWork.setListWorks(listWork); - //get bigKeysDirToTaskMap - ConditionalResolverSkewJoinCtx context = - (ConditionalResolverSkewJoinCtx) conditionalTask.getResolverCtx(); - HashMap> bigKeysDirToTaskMap = - context.getDirToTaskMap(); + ConditionalResolver resolver = conditionalTask.getResolver(); + if(resolver instanceof ConditionalResolverSkewJoin){ + //get bigKeysDirToTaskMap + ConditionalResolverSkewJoinCtx context = + (ConditionalResolverSkewJoinCtx) conditionalTask.getResolverCtx(); + HashMap> bigKeysDirToTaskMap = + context.getDirToTaskMap(); - //to avoid concurrent modify the hashmap - HashMap> newbigKeysDirToTaskMap = - new HashMap>(); + //to avoid concurrent modify the hashmap + HashMap> newbigKeysDirToTaskMap = + new HashMap>(); - //reset the resolver - for(Map.Entry> entry: bigKeysDirToTaskMap.entrySet()){ - Task task = entry.getValue(); - String key = entry.getKey(); + //reset the resolver + for(Map.Entry> entry: bigKeysDirToTaskMap.entrySet()){ + Task task = entry.getValue(); + String key = entry.getKey(); + if(task.equals(currTask)){ + newbigKeysDirToTaskMap.put(key, localTask); + }else{ + newbigKeysDirToTaskMap.put(key, task); + } + } - if(task.equals(currTask)){ - newbigKeysDirToTaskMap.put(key, localTask); - }else{ - newbigKeysDirToTaskMap.put(key, task); + context.setDirToTaskMap(newbigKeysDirToTaskMap); + conditionalTask.setResolverCtx(context); + + }else if(resolver instanceof ConditionalResolverCommonJoin){ + //get bigKeysDirToTaskMap + ConditionalResolverCommonJoinCtx context = + (ConditionalResolverCommonJoinCtx) conditionalTask.getResolverCtx(); + HashMap> aliasToWork = + context.getAliasToTask(); + + //to avoid concurrent modify the hashmap + HashMap> newAliasToWork = + new HashMap>(); + + //reset the resolver + for(Map.Entry> entry: aliasToWork.entrySet()){ + Task task = entry.getValue(); + String key = entry.getKey(); + + if(task.equals(currTask)){ + newAliasToWork.put(key, localTask); + }else{ + newAliasToWork.put(key, task); + } } + + context.setAliasToTask(newAliasToWork); + conditionalTask.setResolverCtx(context); + + }else{ + } - context.setDirToTaskMap(newbigKeysDirToTaskMap); - conditionalTask.setResolverCtx(context); + } } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java (working copy) @@ -49,6 +49,9 @@ if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVESKEWJOIN)) { resolvers.add(new SkewJoinResolver()); } + if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN)) { + resolvers.add(new CommonJoinResolver()); + } resolvers.add(new MapJoinResolver()); } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java (working copy) @@ -273,7 +273,7 @@ JoinOperator cloneJoinOp = (JoinOperator) reducer; MapJoinDesc mapJoinDescriptor = new MapJoinDesc(newJoinKeys, keyTblDesc, - newJoinValues, newJoinValueTblDesc, joinDescriptor + newJoinValues, newJoinValueTblDesc, newJoinValueTblDesc,joinDescriptor .getOutputColumnNames(), i, joinDescriptor.getConds(), joinDescriptor.getFilters(), joinDescriptor.getNoOuterJoin()); mapJoinDescriptor.setTagOrder(tags); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinResolver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinResolver.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinResolver.java (revision 0) @@ -0,0 +1,218 @@ +package org.apache.hadoop.hive.ql.optimizer.physical; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Stack; + +import org.apache.hadoop.hive.ql.exec.ConditionalTask; +import org.apache.hadoop.hive.ql.exec.JoinOperator; +import org.apache.hadoop.hive.ql.exec.MapRedTask; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.lib.Dispatcher; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.TaskGraphWalker; +import org.apache.hadoop.hive.ql.lib.TaskGraphWalker.TaskGraphWalkerContext; +import org.apache.hadoop.hive.ql.optimizer.MapJoinProcessor; +import org.apache.hadoop.hive.ql.parse.ParseContext; +import org.apache.hadoop.hive.ql.parse.QBJoinTree; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ConditionalResolverCommonJoin; +import org.apache.hadoop.hive.ql.plan.ConditionalWork; +import org.apache.hadoop.hive.ql.plan.JoinDesc; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.ConditionalResolverCommonJoin.ConditionalResolverCommonJoinCtx; + + +public class CommonJoinResolver implements PhysicalPlanResolver { + @Override + public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { + + // create dispatcher and graph walker + Dispatcher disp = new CommonJoinTaskDispatcher(pctx); + TaskGraphWalker ogw = new TaskGraphWalker(disp); + + // get all the tasks nodes from root task + ArrayList topNodes = new ArrayList(); + topNodes.addAll(pctx.rootTasks); + + // begin to walk through the task tree. + ogw.startWalking(topNodes, null); + return pctx; + } + + + /** + * Iterator each tasks. If this task has a local work,create a new task for this local work, named + * MapredLocalTask. then make this new generated task depends on current task's parent task, and + * make current task depends on this new generated task + */ + class CommonJoinTaskDispatcher implements Dispatcher { + + private final PhysicalContext physicalContext; + + public CommonJoinTaskDispatcher(PhysicalContext context) { + super(); + physicalContext = context; + } + + private ConditionalTask processCurrentTask(MapRedTask currTask, ConditionalTask conditionalTask) + throws SemanticException { + + // whether it contains common join op; if contains, return this common join op + JoinOperator joinOp = getJoinOp(currTask); + if (joinOp == null) { + return null; + } + MapredWork currWork = currTask.getWork(); + // create conditional work list and task list + List listWorks = new ArrayList(); + List> listTasks = new ArrayList>(); + + // create alias to task mapping and alias to input file mapping for resolver + HashMap> aliasToTask = new HashMap>(); + HashMap aliasToPath = new HashMap(); + HashMap> pathToAliases = currTask.getWork().getPathToAliases(); + + // get parseCtx for this Join Operator + ParseContext parseCtx = physicalContext.getParseContext(); + QBJoinTree joinTree = parseCtx.getJoinContext().get(joinOp); + + // start to generate multiple map join tasks + JoinDesc joinDesc = joinOp.getConf(); + Byte[] order = joinDesc.getTagOrder(); + int numAliases = order.length; + + try { + HashSet smallTableOnlySet = MapJoinProcessor.getSmallTableOnlySet(joinDesc + .getConds()); + // no table could be the big table; there is no need to convert + if (smallTableOnlySet == null) { + return null; + } + currWork.setOpParseCtxMap(parseCtx.getOpParseCtx()); + currWork.setJoinTree(joinTree); + + String xml = currWork.toXML(); + String bigTableAlias = null; + + for (int i = 0; i < numAliases; i++) { + // this table cannot be big table + if (smallTableOnlySet.contains(i)) { + continue; + } + + // create map join task and set big table as i + // deep copy a new mapred work from xml + InputStream in = new ByteArrayInputStream(xml.getBytes("UTF-8")); + MapredWork newWork = Utilities.deserializeMapRedWork(in, physicalContext.getConf()); + // create a mapred task for this work + MapRedTask newTask = (MapRedTask) TaskFactory.get(newWork, physicalContext + .getParseContext().getConf()); + JoinOperator newJoinOp = getJoinOp(newTask); + + // optimize this newWork and assume big table position is i + bigTableAlias = MapJoinProcessor.genMapJoinOpAndLocalWork(newWork, newJoinOp, i); + + // add into conditional task + listWorks.add(newWork); + listTasks.add(newTask); + + // put the mapping alias to task + aliasToTask.put(bigTableAlias, newTask); + + // set alias to path + for (Map.Entry> entry : pathToAliases.entrySet()) { + String path = entry.getKey(); + ArrayList aliasList = entry.getValue(); + if (aliasList.contains(bigTableAlias)) { + aliasToPath.put(bigTableAlias, path); + } + } + + } + + } catch (Exception e) { + e.printStackTrace(); + throw new SemanticException("Generate Map Join Task Error: " + e.getMessage()); + } + + // insert current common join task to conditional task + listWorks.add(currTask.getWork()); + listTasks.add(currTask); + // clear JoinTree and OP Parse Context + currWork.setOpParseCtxMap(null); + currWork.setJoinTree(null); + + // create conditional task and insert conditional task into task tree + ConditionalWork cndWork = new ConditionalWork(listWorks); + ConditionalTask cndTsk = (ConditionalTask) TaskFactory.get(cndWork, parseCtx.getConf()); + cndTsk.setListTasks(listTasks); + + // set resolver and resolver context + cndTsk.setResolver(new ConditionalResolverCommonJoin()); + ConditionalResolverCommonJoinCtx resolverCtx = new ConditionalResolverCommonJoinCtx(); + resolverCtx.setAliasToPath(aliasToPath); + resolverCtx.setAliasToTask(aliasToTask); + resolverCtx.setCommonJoinTask(currTask); + cndTsk.setResolverCtx(resolverCtx); + + ((Task) currTask).replaceWithConditionalTask(cndTsk, physicalContext); + return cndTsk; + } + + + @Override + public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) + throws SemanticException { + if (nodeOutputs == null || nodeOutputs.length == 0) { + throw new SemanticException("No Dispatch Context"); + } + + TaskGraphWalkerContext walkerCtx = (TaskGraphWalkerContext) nodeOutputs[0]; + + Task currTask = (Task) nd; + // not map reduce task or not conditional task, just skip + if (currTask.isMapRedTask()) { + if (currTask instanceof ConditionalTask) { + // get the list of task + List> taskList = ((ConditionalTask) currTask).getListTasks(); + for (Task tsk : taskList) { + if (tsk.isMapRedTask()) { + ConditionalTask cndTask = this.processCurrentTask((MapRedTask) tsk, + ((ConditionalTask) currTask)); + walkerCtx.addToDispatchList(cndTask); + } + } + } else { + ConditionalTask cndTask = this.processCurrentTask((MapRedTask) currTask, null); + walkerCtx.addToDispatchList(cndTask); + } + } + return null; + } + + + private JoinOperator getJoinOp(MapRedTask task) throws SemanticException { + if (task.getWork() == null) { + return null; + } + + Operator reducerOp = task.getWork().getReducer(); + if (reducerOp instanceof JoinOperator) { + return (JoinOperator) reducerOp; + } else { + return null; + } + + } + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java (working copy) @@ -21,6 +21,7 @@ import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; @@ -44,6 +45,7 @@ import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.exec.ScriptOperator; import org.apache.hadoop.hive.ql.exec.SelectOperator; +import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.UnionOperator; import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher; import org.apache.hadoop.hive.ql.lib.Dispatcher; @@ -53,7 +55,6 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; -import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.ErrorMsg; import org.apache.hadoop.hive.ql.parse.GenMapRedWalker; import org.apache.hadoop.hive.ql.parse.OpParseContext; @@ -61,23 +62,28 @@ import org.apache.hadoop.hive.ql.parse.QBJoinTree; import org.apache.hadoop.hive.ql.parse.RowResolver; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.parse.TypeCheckCtx; -import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.FetchWork; +import org.apache.hadoop.hive.ql.plan.JoinCondDesc; import org.apache.hadoop.hive.ql.plan.JoinDesc; import org.apache.hadoop.hive.ql.plan.MapJoinDesc; +import org.apache.hadoop.hive.ql.plan.MapredLocalWork; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; import org.apache.hadoop.hive.ql.plan.SelectDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.serde.Constants; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; /** - * Implementation of one of the rule-based map join optimization. User passes - * hints to specify map-joins and during this optimization, all user specified - * map joins are converted to MapJoins - the reduce sink operator above the join - * are converted to map sink operators. In future, once statistics are - * implemented, this transformation can also be done based on costs. + * Implementation of one of the rule-based map join optimization. User passes hints to specify + * map-joins and during this optimization, all user specified map joins are converted to MapJoins - + * the reduce sink operator above the join are converted to map sink operators. In future, once + * statistics are implemented, this transformation can also be done based on costs. */ public class MapJoinProcessor implements Transform { @@ -93,13 +99,141 @@ } @SuppressWarnings("nls") - private Operator putOpInsertMap( - Operator op, RowResolver rr) { + private Operator putOpInsertMap(Operator op, + RowResolver rr) { OpParseContext ctx = new OpParseContext(rr); pGraphContext.getOpParseCtx().put(op, ctx); return op; } + public static String genMapJoinLocalWork(MapredWork newWork, MapJoinOperator mapJoinOp, + int bigTablePos) throws SemanticException { + // keep the small table alias to avoid concurrent modification exception + ArrayList smallTableAliasList = new ArrayList(); + String bigTableAlias = null; + + // generate MapredLocalWork + MapredLocalWork newLocalWork = new MapredLocalWork( + new LinkedHashMap>(), + new LinkedHashMap()); + for (Map.Entry> entry : newWork.getAliasToWork() + .entrySet()) { + String alias = entry.getKey(); + Operator op = entry.getValue(); + // get table scan op + if (!(op instanceof TableScanOperator)) { + throw new SemanticException("top op is not table scan"); + } + TableScanOperator tableScanOp = (TableScanOperator) op; + + // if the table scan is for big table; then skip it + // tracing down the operator tree from the table scan operator + Operator parentOp = tableScanOp; + Operator childOp = tableScanOp.getChildOperators().get(0); + while ((childOp != null) && (!childOp.equals(mapJoinOp))) { + parentOp = childOp; + assert parentOp.getChildOperators().size() == 1; + childOp = parentOp.getChildOperators().get(0); + } + if (childOp == null) { + throw new SemanticException( + "Cannot find join op by tracing down the table scan operator tree"); + } + // skip the big table pos + int i = childOp.getParentOperators().indexOf(parentOp); + if (i == bigTablePos) { + bigTableAlias = alias; + continue; + } + // set alias to work and put into smallTableAliasList + newLocalWork.getAliasToWork().put(alias, tableScanOp); + smallTableAliasList.add(alias); + // get input path and remove this alias from pathToAlias + // because this file will be fetched by fetch operator + LinkedHashMap> pathToAliases = newWork.getPathToAliases(); + + // keep record all the input path for this alias + HashSet pathSet = new HashSet(); + for (Map.Entry> entry2 : pathToAliases.entrySet()) { + String path = entry2.getKey(); + ArrayList list = entry2.getValue(); + if (list.contains(alias)) { + // add to path set + if (!pathSet.contains(path)) { + pathSet.add(path); + } + // remove this alias from the alias list + list.remove(alias); + } + } + if (pathSet.size() == 0) { + throw new SemanticException("No input path for alias " + alias); + } + + // create fetch work + FetchWork fetchWork = null; + List partDir = new ArrayList(); + List partDesc = new ArrayList(); + + for (String tablePath : pathSet) { + PartitionDesc partitionDesc = newWork.getPathToPartitionInfo().get(tablePath); + // create fetchwork for non partitioned table + if (partitionDesc.getPartSpec() == null || partitionDesc.getPartSpec().size() == 0) { + fetchWork = new FetchWork(tablePath, partitionDesc.getTableDesc()); + break; + } + // if table is partitioned,add partDir and partitionDesc + partDir.add(tablePath); + partDesc.add(partitionDesc); + } + // create fetchwork for partitioned table + if (fetchWork == null) { + fetchWork = new FetchWork(partDir, partDesc); + } + // set alias to fetch work + newLocalWork.getAliasToFetchWork().put(alias, fetchWork); + } + // remove small table ailias from aliasToWork;Avoid concurrent modification + for (String alias : smallTableAliasList) { + newWork.getAliasToWork().remove(alias); + } + + // set up local work + newWork.setMapLocalWork(newLocalWork); + // remove reducer + newWork.setReducer(null); + // return the big table alias + if (bigTableAlias == null) { + throw new SemanticException("Big Table Alias is null"); + } + return bigTableAlias; + } + + public static String genMapJoinOpAndLocalWork(MapredWork newWork, JoinOperator op, int mapJoinPos) + throws SemanticException { + try { + LinkedHashMap, OpParseContext> opParseCtxMap = newWork + .getOpParseCtxMap(); + QBJoinTree newJoinTree = newWork.getJoinTree(); + // generate the map join operator; already checked the map join + MapJoinOperator newMapJoinOp = MapJoinProcessor.convertMapJoin(opParseCtxMap, op, + newJoinTree, mapJoinPos, true); + // generate the local work and return the big table alias + String bigTableAlias = MapJoinProcessor + .genMapJoinLocalWork(newWork, newMapJoinOp, mapJoinPos); + // clean up the mapred work + newWork.setOpParseCtxMap(null); + newWork.setJoinTree(null); + + return bigTableAlias; + + } catch (Exception e) { + e.printStackTrace(); + throw new SemanticException("Generate New MapJoin Opertor Exeception " + e.getMessage()); + } + + } + /** * convert a regular join to a a map-side join. * @@ -108,29 +242,28 @@ * @param qbJoin * qb join tree * @param mapJoinPos - * position of the source to be read as part of map-reduce framework. - * All other sources are cached in memory + * position of the source to be read as part of map-reduce framework. All other sources + * are cached in memory */ - private MapJoinOperator convertMapJoin(ParseContext pctx, JoinOperator op, - QBJoinTree joinTree, int mapJoinPos) throws SemanticException { + public static MapJoinOperator convertMapJoin( + LinkedHashMap, OpParseContext> opParseCtxMap, + JoinOperator op, QBJoinTree joinTree, int mapJoinPos, boolean noCheckOuterJoin) + throws SemanticException { // outer join cannot be performed on a table which is being cached JoinDesc desc = op.getConf(); - org.apache.hadoop.hive.ql.plan.JoinCondDesc[] condns = desc.getConds(); - HiveConf hiveConf = pGraphContext.getConf(); - boolean noCheckOuterJoin = HiveConf.getBoolVar(hiveConf, - HiveConf.ConfVars.HIVEOPTSORTMERGEBUCKETMAPJOIN) - && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTBUCKETMAPJOIN); + JoinCondDesc[] condns = desc.getConds(); + Byte[] tagOrder = desc.getTagOrder(); + if (!noCheckOuterJoin) { checkMapJoin(mapJoinPos, condns); } - RowResolver oldOutputRS = pctx.getOpParseCtx().get(op).getRR(); + RowResolver oldOutputRS = opParseCtxMap.get(op).getRowResolver(); RowResolver outputRS = new RowResolver(); ArrayList outputColumnNames = new ArrayList(); Map> keyExprMap = new HashMap>(); Map> valueExprMap = new HashMap>(); - HashMap> filterMap = - new HashMap>(); + // Map> filterMap = new HashMap>(); // Walk over all the sources (which are guaranteed to be reduce sink // operators). @@ -141,13 +274,14 @@ List> newParentOps = new ArrayList>(); List> oldReduceSinkParentOps = new ArrayList>(); Map colExprMap = new HashMap(); + HashMap> columnTransfer = new HashMap>(); + // found a source which is not to be stored in memory if (leftSrc != null) { // assert mapJoinPos == 0; Operator parentOp = parentOps.get(0); assert parentOp.getParentOperators().size() == 1; - Operator grandParentOp = parentOp - .getParentOperators().get(0); + Operator grandParentOp = parentOp.getParentOperators().get(0); oldReduceSinkParentOps.add(parentOp); grandParentOp.removeChild(parentOp); newParentOps.add(grandParentOp); @@ -159,8 +293,7 @@ if (src != null) { Operator parentOp = parentOps.get(pos); assert parentOp.getParentOperators().size() == 1; - Operator grandParentOp = parentOp - .getParentOperators().get(0); + Operator grandParentOp = parentOp.getParentOperators().get(0); grandParentOp.removeChild(parentOp); oldReduceSinkParentOps.add(parentOp); @@ -171,21 +304,21 @@ // get the join keys from old parent ReduceSink operators for (pos = 0; pos < newParentOps.size(); pos++) { - ReduceSinkOperator oldPar = (ReduceSinkOperator) oldReduceSinkParentOps - .get(pos); + ReduceSinkOperator oldPar = (ReduceSinkOperator) oldReduceSinkParentOps.get(pos); ReduceSinkDesc rsconf = oldPar.getConf(); Byte tag = (byte) rsconf.getTag(); List keys = rsconf.getKeyCols(); keyExprMap.put(tag, keys); + + // set column transfer + HashMap map = (HashMap) oldPar.getColumnExprMap(); + columnTransfer.put(tag, map); } // create the map-join operator for (pos = 0; pos < newParentOps.size(); pos++) { - RowResolver inputRS = pGraphContext.getOpParseCtx().get( - newParentOps.get(pos)).getRR(); - + RowResolver inputRS = opParseCtxMap.get(newParentOps.get(pos)).getRowResolver(); List values = new ArrayList(); - List filterDesc = new ArrayList(); Iterator keysIter = inputRS.getTableNames().iterator(); while (keysIter.hasNext()) { @@ -202,35 +335,49 @@ String outputCol = oldValueInfo.getInternalName(); if (outputRS.get(key, field) == null) { outputColumnNames.add(outputCol); - ExprNodeDesc colDesc = new ExprNodeColumnDesc(valueInfo.getType(), - valueInfo.getInternalName(), valueInfo.getTabAlias(), valueInfo - .getIsVirtualCol()); + ExprNodeDesc colDesc = new ExprNodeColumnDesc(valueInfo.getType(), valueInfo + .getInternalName(), valueInfo.getTabAlias(), valueInfo.getIsVirtualCol()); values.add(colDesc); - outputRS.put(key, field, new ColumnInfo(outputCol, valueInfo - .getType(), valueInfo.getTabAlias(), valueInfo - .getIsVirtualCol(),valueInfo.isHiddenVirtualCol())); + outputRS.put(key, field, new ColumnInfo(outputCol, valueInfo.getType(), valueInfo + .getTabAlias(), valueInfo.getIsVirtualCol(), valueInfo.isHiddenVirtualCol())); colExprMap.put(outputCol, colDesc); } } } - TypeCheckCtx tcCtx = new TypeCheckCtx(inputRS); - for (ASTNode cond : joinTree.getFilters().get((byte)pos)) { + valueExprMap.put(new Byte((byte) pos), values); + // filterMap.put(new Byte((byte) pos), filterDesc); + } - ExprNodeDesc filter = - (ExprNodeDesc)TypeCheckProcFactory.genExprNode(cond, tcCtx).get(cond); - if (filter == null) { - throw new SemanticException(tcCtx.getError()); + Map> filterMap = desc.getFilters(); + for (Map.Entry> entry : filterMap.entrySet()) { + Byte srcAlias = entry.getKey(); + List columnDescList = entry.getValue(); + + for (ExprNodeDesc nodeExpr : columnDescList) { + ExprNodeGenericFuncDesc funcDesc = (ExprNodeGenericFuncDesc) nodeExpr; + for (ExprNodeDesc childDesc : funcDesc.getChildExprs()) { + if (!(childDesc instanceof ExprNodeColumnDesc)) { + continue; + } + ExprNodeColumnDesc columnDesc = (ExprNodeColumnDesc) childDesc; + // reset columns + String column = columnDesc.getColumn(); + String newColumn = null; + HashMap map = columnTransfer.get(srcAlias); + ExprNodeColumnDesc tmpDesc = (ExprNodeColumnDesc) map.get(column); + if (tmpDesc != null) { + newColumn = tmpDesc.getColumn(); + } + if (newColumn == null) { + throw new SemanticException("No Column name found in parent reduce sink op"); + } + columnDesc.setColumn(newColumn); } - filterDesc.add(filter); } - - valueExprMap.put(new Byte((byte) pos), values); - filterMap.put(new Byte((byte) pos), filterDesc); } - org.apache.hadoop.hive.ql.plan.JoinCondDesc[] joinCondns = op.getConf() - .getConds(); + JoinCondDesc[] joinCondns = op.getConf().getConds(); Operator[] newPar = new Operator[newParentOps.size()]; pos = 0; @@ -248,9 +395,25 @@ .getFieldSchemasFromColumnList(keyCols, "mapjoinkey")); List valueTableDescs = new ArrayList(); + List valueFiltedTableDescs = new ArrayList(); for (pos = 0; pos < newParentOps.size(); pos++) { List valueCols = valueExprMap.get(new Byte((byte) pos)); + int length = valueCols.size(); + List valueFilteredCols = new ArrayList(length); + // deep copy expr node desc + for (int i = 0; i < length; i++) { + valueFilteredCols.add(valueCols.get(i).clone()); + } + List valueFilters = filterMap.get(new Byte((byte) pos)); + + if (valueFilters != null && valueFilters.size() != 0 && pos != mapJoinPos) { + ExprNodeColumnDesc isFilterDesc = new ExprNodeColumnDesc(TypeInfoFactory + .getPrimitiveTypeInfo(Constants.BOOLEAN_TYPE_NAME), "filter", "filter", false); + valueFilteredCols.add(isFilterDesc); + } + + keyOrder = new StringBuilder(); for (int i = 0; i < valueCols.size(); i++) { keyOrder.append("+"); @@ -258,16 +421,23 @@ TableDesc valueTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils .getFieldSchemasFromColumnList(valueCols, "mapjoinvalue")); + TableDesc valueFilteredTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils + .getFieldSchemasFromColumnList(valueFilteredCols, "mapjoinvalue")); valueTableDescs.add(valueTableDesc); + valueFiltedTableDescs.add(valueFilteredTableDesc); } + MapJoinDesc mapJoinDescriptor = new MapJoinDesc(keyExprMap, keyTableDesc, valueExprMap, + valueTableDescs, valueFiltedTableDescs, outputColumnNames, mapJoinPos, joinCondns, + filterMap, op.getConf().getNoOuterJoin()); + mapJoinDescriptor.setTagOrder(tagOrder); - MapJoinOperator mapJoinOp = (MapJoinOperator) putOpInsertMap( - OperatorFactory.getAndMakeChild(new MapJoinDesc(keyExprMap, - keyTableDesc, valueExprMap, valueTableDescs, outputColumnNames, - mapJoinPos, joinCondns, filterMap, op.getConf().getNoOuterJoin()), - new RowSchema(outputRS.getColumnInfos()), newPar), outputRS); + MapJoinOperator mapJoinOp = (MapJoinOperator) OperatorFactory.getAndMakeChild( + mapJoinDescriptor, new RowSchema(outputRS.getColumnInfos()), newPar); + OpParseContext ctx = new OpParseContext(outputRS); + opParseCtxMap.put(mapJoinOp, ctx); + mapJoinOp.getConf().setReversedExprs(op.getConf().getReversedExprs()); mapJoinOp.setColumnExprMap(colExprMap); @@ -283,37 +453,59 @@ op.setChildOperators(null); op.setParentOperators(null); + return mapJoinOp; + } + + public MapJoinOperator generateMapJoniOperator(ParseContext pctx, JoinOperator op, + QBJoinTree joinTree, int mapJoinPos) throws SemanticException { + HiveConf hiveConf = pctx.getConf(); + boolean noCheckOuterJoin = HiveConf.getBoolVar(hiveConf, + HiveConf.ConfVars.HIVEOPTSORTMERGEBUCKETMAPJOIN) + && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTBUCKETMAPJOIN); + + + LinkedHashMap, OpParseContext> opParseCtxMap = pctx + .getOpParseCtx(); + MapJoinOperator mapJoinOp = convertMapJoin(opParseCtxMap, op, joinTree, mapJoinPos, + noCheckOuterJoin); // create a dummy select to select all columns genSelectPlan(pctx, mapJoinOp); return mapJoinOp; } - public static void checkMapJoin(int mapJoinPos, - org.apache.hadoop.hive.ql.plan.JoinCondDesc[] condns) - throws SemanticException { - for (org.apache.hadoop.hive.ql.plan.JoinCondDesc condn : condns) { - if (condn.getType() == JoinDesc.FULL_OUTER_JOIN) { - throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg()); + public static HashSet getSmallTableOnlySet(JoinCondDesc[] condns) { + HashSet smallTableOnlySet = new HashSet(); + + for (JoinCondDesc condn : condns) { + int joinType = condn.getType(); + if (joinType == JoinDesc.FULL_OUTER_JOIN) { + return null; + } else if (joinType == JoinDesc.LEFT_OUTER_JOIN || joinType == JoinDesc.LEFT_SEMI_JOIN) { + smallTableOnlySet.add(condn.getRight()); + } else if (joinType == JoinDesc.RIGHT_OUTER_JOIN) { + smallTableOnlySet.add(condn.getLeft()); } - if ((condn.getType() == JoinDesc.LEFT_OUTER_JOIN) - && (condn.getLeft() != mapJoinPos)) { - throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg()); - } - if ((condn.getType() == JoinDesc.RIGHT_OUTER_JOIN) - && (condn.getRight() != mapJoinPos)) { - throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg()); - } } + + return smallTableOnlySet; } - private void genSelectPlan(ParseContext pctx, MapJoinOperator input) - throws SemanticException { + public static void checkMapJoin(int mapJoinPos, JoinCondDesc[] condns) throws SemanticException { + HashSet smallTableOnlySet = MapJoinProcessor.getSmallTableOnlySet(condns); + + if (smallTableOnlySet == null || smallTableOnlySet.contains(mapJoinPos)) { + throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg()); + } + return; + } + + private void genSelectPlan(ParseContext pctx, MapJoinOperator input) throws SemanticException { List> childOps = input.getChildOperators(); input.setChildOperators(null); // create a dummy select - This select is needed by the walker to split the // mapJoin later on - RowResolver inputRR = pctx.getOpParseCtx().get(input).getRR(); + RowResolver inputRR = pctx.getOpParseCtx().get(input).getRowResolver(); ArrayList exprs = new ArrayList(); ArrayList outputs = new ArrayList(); @@ -326,20 +518,19 @@ String internalName = outputCols.get(i); String[] nm = inputRR.reverseLookup(internalName); ColumnInfo valueInfo = inputRR.get(nm[0], nm[1]); - ExprNodeDesc colDesc = new ExprNodeColumnDesc(valueInfo.getType(), - valueInfo.getInternalName(), nm[0], valueInfo.getIsVirtualCol()); + ExprNodeDesc colDesc = new ExprNodeColumnDesc(valueInfo.getType(), valueInfo + .getInternalName(), nm[0], valueInfo.getIsVirtualCol()); exprs.add(colDesc); outputs.add(internalName); - outputRS.put(nm[0], nm[1], new ColumnInfo(internalName, valueInfo - .getType(), nm[0], valueInfo.getIsVirtualCol(), valueInfo.isHiddenVirtualCol())); + outputRS.put(nm[0], nm[1], new ColumnInfo(internalName, valueInfo.getType(), nm[0], valueInfo + .getIsVirtualCol(), valueInfo.isHiddenVirtualCol())); colExprMap.put(internalName, colDesc); } SelectDesc select = new SelectDesc(exprs, outputs, false); - SelectOperator sel = (SelectOperator) putOpInsertMap( - OperatorFactory.getAndMakeChild(select, new RowSchema(inputRR - .getColumnInfos()), input), inputRR); + SelectOperator sel = (SelectOperator) putOpInsertMap(OperatorFactory.getAndMakeChild(select, + new RowSchema(inputRR.getColumnInfos()), input), inputRR); sel.setColumnExprMap(colExprMap); @@ -357,11 +548,10 @@ * join operator * @param qbJoin * qb join tree - * @return -1 if it cannot be converted to a map-side join, position of the - * map join node otherwise + * @return -1 if it cannot be converted to a map-side join, position of the map join node + * otherwise */ - private int mapSideJoin(JoinOperator op, QBJoinTree joinTree) - throws SemanticException { + private int mapSideJoin(JoinOperator op, QBJoinTree joinTree) throws SemanticException { int mapJoinPos = -1; if (joinTree.isMapSideJoin()) { int pos = 0; @@ -387,8 +577,8 @@ // support this by randomly // leaving some table from the list of tables to be cached if (mapJoinPos == -1) { - throw new SemanticException(ErrorMsg.INVALID_MAPJOIN_HINT - .getMsg(pGraphContext.getQB().getParseInfo().getHints())); + throw new SemanticException(ErrorMsg.INVALID_MAPJOIN_HINT.getMsg(pGraphContext.getQB() + .getParseInfo().getHints())); } } @@ -396,8 +586,8 @@ } /** - * Transform the query tree. For each join, check if it is a map-side join - * (user specified). If yes, convert it to a map-side join. + * Transform the query tree. For each join, check if it is a map-side join (user specified). If + * yes, convert it to a map-side join. * * @param pactx * current parse context @@ -410,22 +600,20 @@ if (pGraphContext.getJoinContext() != null) { Map joinMap = new HashMap(); Map mapJoinMap = pGraphContext.getMapJoinContext(); - if(mapJoinMap == null) { - mapJoinMap = new HashMap (); + if (mapJoinMap == null) { + mapJoinMap = new HashMap(); pGraphContext.setMapJoinContext(mapJoinMap); } - Set> joinCtx = pGraphContext - .getJoinContext().entrySet(); - Iterator> joinCtxIter = joinCtx - .iterator(); + Set> joinCtx = pGraphContext.getJoinContext().entrySet(); + Iterator> joinCtxIter = joinCtx.iterator(); while (joinCtxIter.hasNext()) { Map.Entry joinEntry = joinCtxIter.next(); JoinOperator joinOp = joinEntry.getKey(); QBJoinTree qbJoin = joinEntry.getValue(); int mapJoinPos = mapSideJoin(joinOp, qbJoin); if (mapJoinPos >= 0) { - MapJoinOperator mapJoinOp = convertMapJoin(pactx, joinOp, qbJoin, mapJoinPos); + MapJoinOperator mapJoinOp = generateMapJoniOperator(pactx, joinOp, qbJoin, mapJoinPos); listMapJoinOps.add(mapJoinOp); mapJoinMap.put(mapJoinOp, qbJoin); } else { @@ -444,19 +632,15 @@ // the operator stack. // The dispatcher generates the plan from the operator tree Map opRules = new LinkedHashMap(); - opRules.put(new RuleRegExp(new String("R0"), "MAPJOIN%"), - getCurrentMapJoin()); - opRules.put(new RuleRegExp(new String("R1"), "MAPJOIN%.*FS%"), - getMapJoinFS()); - opRules.put(new RuleRegExp(new String("R2"), "MAPJOIN%.*RS%"), - getMapJoinDefault()); - opRules.put(new RuleRegExp(new String("R4"), "MAPJOIN%.*UNION%"), - getMapJoinDefault()); + opRules.put(new RuleRegExp(new String("R0"), "MAPJOIN%"), getCurrentMapJoin()); + opRules.put(new RuleRegExp(new String("R1"), "MAPJOIN%.*FS%"), getMapJoinFS()); + opRules.put(new RuleRegExp(new String("R2"), "MAPJOIN%.*RS%"), getMapJoinDefault()); + opRules.put(new RuleRegExp(new String("R4"), "MAPJOIN%.*UNION%"), getMapJoinDefault()); // The dispatcher fires the processor corresponding to the closest matching // rule and passes the context along - Dispatcher disp = new DefaultRuleDispatcher(getDefault(), opRules, - new MapJoinWalkerCtx(listMapJoinOpsNoRed, pGraphContext)); + Dispatcher disp = new DefaultRuleDispatcher(getDefault(), opRules, new MapJoinWalkerCtx( + listMapJoinOpsNoRed, pGraphContext)); GraphWalker ogw = new GenMapRedWalker(disp); ArrayList topNodes = new ArrayList(); @@ -483,7 +667,7 @@ MapJoinWalkerCtx ctx = (MapJoinWalkerCtx) procCtx; MapJoinOperator mapJoin = (MapJoinOperator) nd; if (ctx.getListRejectedMapJoins() != null && !ctx.getListRejectedMapJoins().contains(mapJoin)) { - //for rule: MapJoin%.*MapJoin + // for rule: MapJoin%.*MapJoin // have a child mapjoin. if the the current mapjoin is on a local work, // will put the current mapjoin in the rejected list. Boolean bigBranch = findGrandChildSubqueryMapjoin(ctx, mapJoin); @@ -491,7 +675,7 @@ ctx.setCurrMapJoinOp(mapJoin); return null; } - if(bigBranch) { + if (bigBranch) { addNoReducerMapJoinToCtx(ctx, mapJoin); } else { addRejectMapJoinToCtx(ctx, mapJoin); @@ -505,28 +689,24 @@ private Boolean findGrandChildSubqueryMapjoin(MapJoinWalkerCtx ctx, MapJoinOperator mapJoin) { Operator parent = mapJoin; while (true) { - if(parent.getChildOperators() == null || parent.getChildOperators().size() != 1) { + if (parent.getChildOperators() == null || parent.getChildOperators().size() != 1) { return null; } Operator ch = parent.getChildOperators().get(0); - if(ch instanceof MapJoinOperator) { - if (!nonSubqueryMapJoin(ctx.getpGraphContext(), (MapJoinOperator) ch, - mapJoin)) { - if (ch.getParentOperators().indexOf(parent) == ((MapJoinOperator) ch) - .getConf().getPosBigTable()) { - //not come from the local branch + if (ch instanceof MapJoinOperator) { + if (!nonSubqueryMapJoin(ctx.getpGraphContext(), (MapJoinOperator) ch, mapJoin)) { + if (ch.getParentOperators().indexOf(parent) == ((MapJoinOperator) ch).getConf() + .getPosBigTable()) { + // not come from the local branch return true; } } return false; // not from a sub-query. } - if ((ch instanceof JoinOperator) - || (ch instanceof UnionOperator) - || (ch instanceof ReduceSinkOperator) - || (ch instanceof LateralViewJoinOperator) - || (ch instanceof GroupByOperator) - || (ch instanceof ScriptOperator)) { + if ((ch instanceof JoinOperator) || (ch instanceof UnionOperator) + || (ch instanceof ReduceSinkOperator) || (ch instanceof LateralViewJoinOperator) + || (ch instanceof GroupByOperator) || (ch instanceof ScriptOperator)) { return null; } @@ -534,11 +714,11 @@ } } - private boolean nonSubqueryMapJoin(ParseContext pGraphContext, - MapJoinOperator mapJoin, MapJoinOperator parentMapJoin) { + private boolean nonSubqueryMapJoin(ParseContext pGraphContext, MapJoinOperator mapJoin, + MapJoinOperator parentMapJoin) { QBJoinTree joinTree = pGraphContext.getMapJoinContext().get(mapJoin); QBJoinTree parentJoinTree = pGraphContext.getMapJoinContext().get(parentMapJoin); - if(joinTree.getJoinSrc() != null && joinTree.getJoinSrc().equals(parentJoinTree)) { + if (joinTree.getJoinSrc() != null && joinTree.getJoinSrc().equals(parentJoinTree)) { return true; } return false; @@ -547,11 +727,11 @@ private static void addNoReducerMapJoinToCtx(MapJoinWalkerCtx ctx, AbstractMapJoinOperator mapJoin) { - if (ctx.getListRejectedMapJoins() != null - && ctx.getListRejectedMapJoins().contains(mapJoin)) { + if (ctx.getListRejectedMapJoins() != null && ctx.getListRejectedMapJoins().contains(mapJoin)) { return; } - List> listMapJoinsNoRed = ctx.getListMapJoinsNoRed(); + List> listMapJoinsNoRed = ctx + .getListMapJoinsNoRed(); if (listMapJoinsNoRed == null) { listMapJoinsNoRed = new ArrayList>(); } @@ -565,10 +745,11 @@ AbstractMapJoinOperator mapjoin) { // current map join is null means it has been handled by CurrentMapJoin // process. - if(mapjoin == null) { + if (mapjoin == null) { return; } - List> listRejectedMapJoins = ctx.getListRejectedMapJoins(); + List> listRejectedMapJoins = ctx + .getListRejectedMapJoins(); if (listRejectedMapJoins == null) { listRejectedMapJoins = new ArrayList>(); } @@ -576,15 +757,15 @@ listRejectedMapJoins.add(mapjoin); } - if (ctx.getListMapJoinsNoRed() != null - && ctx.getListMapJoinsNoRed().contains(mapjoin)) { + if (ctx.getListMapJoinsNoRed() != null && ctx.getListMapJoinsNoRed().contains(mapjoin)) { ctx.getListMapJoinsNoRed().remove(mapjoin); } ctx.setListRejectedMapJoins(listRejectedMapJoins); } - private static int findGrandparentBranch(Operator currOp, Operator grandParent) { + private static int findGrandparentBranch(Operator currOp, + Operator grandParent) { int pos = -1; for (int i = 0; i < currOp.getParentOperators().size(); i++) { List> parentOpList = new LinkedList>(); @@ -592,14 +773,14 @@ boolean found = false; while (!parentOpList.isEmpty()) { Operator p = parentOpList.remove(0); - if(p == grandParent) { + if (p == grandParent) { found = true; break; - } else if (p.getParentOperators() != null){ + } else if (p.getParentOperators() != null) { parentOpList.addAll(p.getParentOperators()); } } - if(found) { + if (found) { pos = i; break; } @@ -626,8 +807,7 @@ .getListRejectedMapJoins(); // the mapjoin has already been handled - if ((listRejectedMapJoins != null) - && (listRejectedMapJoins.contains(mapJoin))) { + if ((listRejectedMapJoins != null) && (listRejectedMapJoins.contains(mapJoin))) { return null; } addNoReducerMapJoinToCtx(ctx, mapJoin); @@ -701,7 +881,8 @@ * @param listMapJoinsNoRed * @param pGraphContext2 */ - public MapJoinWalkerCtx(List> listMapJoinsNoRed, ParseContext pGraphContext) { + public MapJoinWalkerCtx(List> listMapJoinsNoRed, + ParseContext pGraphContext) { this.listMapJoinsNoRed = listMapJoinsNoRed; currMapJoinOp = null; listRejectedMapJoins = new ArrayList>(); @@ -719,7 +900,8 @@ * @param listMapJoinsNoRed * the listMapJoins to set */ - public void setListMapJoins(List> listMapJoinsNoRed) { + public void setListMapJoins( + List> listMapJoinsNoRed) { this.listMapJoinsNoRed = listMapJoinsNoRed; } Index: ql/src/java/org/apache/hadoop/hive/ql/ppd/OpWalkerInfo.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/ppd/OpWalkerInfo.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/ppd/OpWalkerInfo.java (working copy) @@ -56,7 +56,7 @@ } public RowResolver getRowResolver(Node op) { - return opToParseCtxMap.get(op).getRR(); + return opToParseCtxMap.get(op).getRowResolver(); } public OpParseContext put(Operator key, Index: ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecute.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecute.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecute.java (working copy) @@ -18,33 +18,13 @@ package org.apache.hadoop.hive.ql.hooks; -import java.util.Set; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.security.UserGroupInformation; - /** * The post execute hook interface. A list of such hooks can be configured to be * called after compilation and before execution. */ public interface PostExecute { - /** - * The run command that is called just before the execution of the query. - * - * @param sess - * The session state. - * @param inputs - * The set of input tables and partitions. - * @param outputs - * The set of output tables, partitions, local and hdfs directories. - * @param lInfo - * The column level lineage information. - * @param ugi - * The user group security information. - */ - void run(SessionState sess, Set inputs, - Set outputs, LineageInfo lInfo, - UserGroupInformation ugi) throws Exception; + public void run(HookContext hookContext) throws Exception; } Index: ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecute.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecute.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecute.java (working copy) @@ -18,31 +18,14 @@ package org.apache.hadoop.hive.ql.hooks; -import java.util.Set; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.security.UserGroupInformation; - /** * The pre execute hook interface. A list of such hooks can be configured to be * called after compilation and before execution. */ public interface PreExecute { - /** - * The run command that is called just before the execution of the query. - * - * @param sess - * The session state. - * @param inputs - * The set of input tables and partitions. - * @param outputs - * The set of output tables, partitions, local and hdfs directories. - * @param ugi - * The user group security information. - */ - public void run(SessionState sess, Set inputs, - Set outputs, UserGroupInformation ugi) - throws Exception; + public void run(HookContext hookContext) throws Exception; + } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java (working copy) @@ -42,19 +42,23 @@ import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; import org.apache.hadoop.hive.serde2.SerDe; +import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; +import org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.util.ReflectionUtils; + public class HashTableSinkOperator extends TerminalOperator - implements Serializable { +implements Serializable { private static final long serialVersionUID = 1L; private static final Log LOG = LogFactory.getLog(HashTableSinkOperator.class .getName()); - // from abstract map join operator + //from abstract map join operator /** * The expressions for join inputs's join keys. */ @@ -68,10 +72,8 @@ */ protected transient Map> joinKeysStandardObjectInspectors; - protected transient int posBigTableTag = -1; // one of the tables that is not - // in memory - protected transient int posBigTableAlias = -1; // one of the tables that is - // not in memory + protected transient int posBigTableTag = -1; // one of the tables that is not in memory + protected transient int posBigTableAlias = -1; // one of the tables that is not in memory transient int mapJoinRowsKey; // rows for a given key protected transient RowContainer> emptyList = null; @@ -102,7 +104,8 @@ */ protected transient Map> joinValuesStandardObjectInspectors; - protected transient Map> rowContainerStandardObjectInspectors; + protected transient + Map> rowContainerStandardObjectInspectors; protected transient Byte[] order; // order in which the results should Configuration hconf; @@ -112,9 +115,12 @@ protected transient Map> mapJoinTables; protected transient boolean noOuterJoin; - private long rowNumber = 0; + private long rowNumber=0; protected transient LogHelper console; + private long hashTableScale; + private boolean isAbort =false; + public static class HashTableSinkObjectCtx { ObjectInspector standardOI; SerDe serde; @@ -157,85 +163,83 @@ } + private static final transient String[] FATAL_ERR_MSG = { + null, // counter value 0 means no error + "Mapside join size exceeds hive.mapjoin.maxsize. " + + "Please increase that or remove the mapjoin hint." + }; private final int metadataKeyTag = -1; transient int[] metadataValueTag; transient int maxMapJoinSize; - public HashTableSinkOperator() { - // super(); - console = new LogHelper(LOG, true); + + public HashTableSinkOperator(){ } - public HashTableSinkOperator(MapJoinOperator mjop) { + public HashTableSinkOperator(MapJoinOperator mjop){ this.conf = new HashTableSinkDesc(mjop.getConf()); - console = new LogHelper(LOG); } + @Override protected void initializeOp(Configuration hconf) throws HiveException { - + boolean isSilent = HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVESESSIONSILENT); + console = new LogHelper(LOG,isSilent); maxMapJoinSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEMAXMAPJOINSIZE); numMapRowsRead = 0; firstRow = true; - // for small tables only; so get the big table position first + //for small tables only; so get the big table position first posBigTableTag = conf.getPosBigTable(); order = conf.getTagOrder(); - posBigTableAlias = order[posBigTableTag]; + posBigTableAlias=order[posBigTableTag]; - // initialize some variables, which used to be initialized in - // CommonJoinOperator + //initialize some variables, which used to be initialized in CommonJoinOperator numAliases = conf.getExprs().size(); this.hconf = hconf; totalSz = 0; noOuterJoin = conf.isNoOuterJoin(); - // process join keys + //process join keys joinKeys = new HashMap>(); - JoinUtil.populateJoinKeyValue(joinKeys, conf.getKeys(), order, - posBigTableAlias); - joinKeysObjectInspectors = JoinUtil.getObjectInspectorsFromEvaluators( - joinKeys, inputObjInspectors, posBigTableAlias); - joinKeysStandardObjectInspectors = JoinUtil.getStandardObjectInspectors( - joinKeysObjectInspectors, posBigTableAlias); + JoinUtil.populateJoinKeyValue(joinKeys, conf.getKeys(),order,posBigTableAlias); + joinKeysObjectInspectors = JoinUtil.getObjectInspectorsFromEvaluators(joinKeys,inputObjInspectors,posBigTableAlias); + joinKeysStandardObjectInspectors = JoinUtil.getStandardObjectInspectors(joinKeysObjectInspectors,posBigTableAlias); - // process join values + //process join values joinValues = new HashMap>(); - JoinUtil.populateJoinKeyValue(joinValues, conf.getExprs(), order, - posBigTableAlias); - joinValuesObjectInspectors = JoinUtil.getObjectInspectorsFromEvaluators( - joinValues, inputObjInspectors, posBigTableAlias); - joinValuesStandardObjectInspectors = JoinUtil.getStandardObjectInspectors( - joinValuesObjectInspectors, posBigTableAlias); + JoinUtil.populateJoinKeyValue(joinValues, conf.getExprs(),order,posBigTableAlias); + joinValuesObjectInspectors = JoinUtil.getObjectInspectorsFromEvaluators(joinValues,inputObjInspectors,posBigTableAlias); + joinValuesStandardObjectInspectors = JoinUtil.getStandardObjectInspectors(joinValuesObjectInspectors,posBigTableAlias); - // process join filters + //process join filters joinFilters = new HashMap>(); - JoinUtil.populateJoinKeyValue(joinFilters, conf.getFilters(), order, - posBigTableAlias); - joinFilterObjectInspectors = JoinUtil.getObjectInspectorsFromEvaluators( - joinFilters, inputObjInspectors, posBigTableAlias); + JoinUtil.populateJoinKeyValue(joinFilters, conf.getFilters(),order,posBigTableAlias); + joinFilterObjectInspectors = JoinUtil.getObjectInspectorsFromEvaluators(joinFilters,inputObjInspectors,posBigTableAlias); if (noOuterJoin) { rowContainerStandardObjectInspectors = joinValuesStandardObjectInspectors; } else { - Map> rowContainerObjectInspectors = new HashMap>(); + Map> rowContainerObjectInspectors = + new HashMap>(); for (Byte alias : order) { - if (alias == posBigTableAlias) { + if(alias == posBigTableAlias){ continue; } ArrayList rcOIs = new ArrayList(); rcOIs.addAll(joinValuesObjectInspectors.get(alias)); // for each alias, add object inspector for boolean as the last element - rcOIs - .add(PrimitiveObjectInspectorFactory.writableBooleanObjectInspector); + rcOIs.add( + PrimitiveObjectInspectorFactory.writableBooleanObjectInspector); rowContainerObjectInspectors.put(alias, rcOIs); } - rowContainerStandardObjectInspectors = getStandardObjectInspectors(rowContainerObjectInspectors); + rowContainerStandardObjectInspectors = + getStandardObjectInspectors(rowContainerObjectInspectors); } metadataValueTag = new int[numAliases]; @@ -245,17 +249,33 @@ mapJoinTables = new HashMap>(); + int hashTableThreshold = HiveConf.getIntVar(hconf, + HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD); + float hashTableLoadFactor = HiveConf.getFloatVar(hconf, + HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR); + float hashTableMaxMemoryUsage = HiveConf.getFloatVar(hconf, + HiveConf.ConfVars.HIVEHASHTABLEMAXMEMORYUSAGE); + hashTableScale =HiveConf.getLongVar(hconf, + HiveConf.ConfVars.HIVEHASHTABLESCALE); + if(hashTableScale <= 0) { + hashTableScale =1; + } + // initialize the hash tables for other tables - for (Byte pos : order) { + for (Byte pos:order) { if (pos == posBigTableTag) { continue; } - HashMapWrapper hashTable = new HashMapWrapper(); + HashMapWrapper hashTable = + new HashMapWrapper(hashTableThreshold,hashTableLoadFactor,hashTableMaxMemoryUsage); + mapJoinTables.put(pos, hashTable); } } + + protected static HashMap> getStandardObjectInspectors( Map> aliasToObjectInspectors) { HashMap> result = new HashMap>(); @@ -272,50 +292,53 @@ result.put(alias, fieldOIList); } return result; + } - public void generateMapMetaData() throws Exception { + private void setKeyMetaData() throws SerDeException{ TableDesc keyTableDesc = conf.getKeyTblDesc(); - SerDe keySerializer = (SerDe) ReflectionUtils.newInstance(keyTableDesc - .getDeserializerClass(), null); + SerDe keySerializer = (SerDe) ReflectionUtils.newInstance( + keyTableDesc.getDeserializerClass(), null); keySerializer.initialize(null, keyTableDesc.getProperties()); MapJoinMetaData.clear(); MapJoinMetaData.put(Integer.valueOf(metadataKeyTag), - new HashTableSinkObjectCtx(ObjectInspectorUtils - .getStandardObjectInspector(keySerializer.getObjectInspector(), - ObjectInspectorCopyOption.WRITABLE), keySerializer, - keyTableDesc, hconf)); + new HashTableSinkObjectCtx( + ObjectInspectorUtils + .getStandardObjectInspector(keySerializer + .getObjectInspector(), + ObjectInspectorCopyOption.WRITABLE), keySerializer, + keyTableDesc, hconf)); } /* - * This operator only process small tables Read the key/value pairs Load them - * into hashtable + * This operator only process small tables + * Read the key/value pairs + * Load them into hashtable */ @Override - public void processOp(Object row, int tag) throws HiveException { - // let the mapJoinOp process these small tables - try { - if (firstRow) { - // generate the map metadata - generateMapMetaData(); + public void processOp(Object row, int tag) throws HiveException{ + //let the mapJoinOp process these small tables + try{ + if(firstRow){ + //generate the map metadata + setKeyMetaData(); firstRow = false; } alias = order[tag]; - // alias = (byte)tag; + //alias = (byte)tag; // compute keys and values as StandardObjects - AbstractMapJoinKey keyMap = JoinUtil.computeMapJoinKeys(row, joinKeys - .get(alias), joinKeysObjectInspectors.get(alias)); + AbstractMapJoinKey keyMap= JoinUtil.computeMapJoinKeys(row, joinKeys.get(alias), + joinKeysObjectInspectors.get(alias)); - Object[] value = JoinUtil.computeMapJoinValues(row, - joinValues.get(alias), joinValuesObjectInspectors.get(alias), - joinFilters.get(alias), joinFilterObjectInspectors.get(alias), - noOuterJoin); + Object[] value = JoinUtil.computeMapJoinValues(row, joinValues.get(alias), + joinValuesObjectInspectors.get(alias),joinFilters.get(alias), + joinFilterObjectInspectors.get(alias), noOuterJoin); - HashMapWrapper hashTable = mapJoinTables - .get((byte) tag); + HashMapWrapper hashTable = mapJoinTables.get((byte) tag); + MapJoinObjectValue o = hashTable.get(keyMap); MapJoinRowContainer res = null; @@ -326,92 +349,103 @@ if (metadataValueTag[tag] == -1) { metadataValueTag[tag] = order[tag]; - - TableDesc valueTableDesc = conf.getValueTblDescs().get(tag); - SerDe valueSerDe = (SerDe) ReflectionUtils.newInstance(valueTableDesc - .getDeserializerClass(), null); - valueSerDe.initialize(null, valueTableDesc.getProperties()); - - MapJoinMetaData.put(Integer.valueOf(metadataValueTag[tag]), - new HashTableSinkObjectCtx(ObjectInspectorUtils - .getStandardObjectInspector(valueSerDe.getObjectInspector(), - ObjectInspectorCopyOption.WRITABLE), valueSerDe, - valueTableDesc, hconf)); + setValueMetaData(tag); } // Construct externalizable objects for key and value if (needNewKey) { MapJoinObjectValue valueObj = new MapJoinObjectValue( metadataValueTag[tag], res); + rowNumber++; + if(rowNumber >hashTableScale && rowNumber%hashTableScale ==0 ){ + isAbort = hashTable.isAbort(rowNumber,console); + if(isAbort){ + throw new HiveException("RunOutOfMeomoryUsage"); + } + } hashTable.put(keyMap, valueObj); } } else { res = o.getObj(); res.add(value); - } + } - } catch (Exception e) { - e.printStackTrace(); + + }catch (SerDeException e) { throw new HiveException(e); } } + private void setValueMetaData(int tag) throws SerDeException{ + TableDesc valueTableDesc = conf.getValueTblFilteredDescs().get(tag); + SerDe valueSerDe = (SerDe) ReflectionUtils.newInstance(valueTableDesc + .getDeserializerClass(), null); + + valueSerDe.initialize(null, valueTableDesc.getProperties()); + + List newFields = rowContainerStandardObjectInspectors.get((Byte)alias); + int length = newFields.size(); + List newNames = new ArrayList(length); + for (int i = 0; i> hashTables : mapJoinTables - .entrySet()) { - // get the key and value + for (Map.Entry> hashTables : mapJoinTables.entrySet()) { + //get the key and value Byte tag = hashTables.getKey(); - HashMapWrapper hashTable = hashTables - .getValue(); + HashMapWrapper hashTable = hashTables.getValue(); - // get current input file name - String bigBucketFileName = this.getExecContext() - .getCurrentBigBucketFile(); - if (bigBucketFileName == null || bigBucketFileName.length() == 0) { - bigBucketFileName = "-"; + //get current input file name + String bigBucketFileName = this.getExecContext().getCurrentBigBucketFile(); + if(bigBucketFileName == null ||bigBucketFileName.length()==0) { + bigBucketFileName="-"; } - // get the tmp URI path; it will be a hdfs path if not local mode - String tmpURIPath = Utilities.generatePath(tmpURI, tag, - bigBucketFileName); - console.printInfo(Utilities.now() - + "\tDump the hashtable into file: " + tmpURIPath); - // get the hashtable file and path + //get the tmp URI path; it will be a hdfs path if not local mode + String tmpURIPath = PathUtil.generatePath(tmpURI, tag, bigBucketFileName); + hashTable.isAbort(rowNumber,console); + console.printInfo(Utilities.now()+"\t Dump the hashtable into file: "+tmpURIPath); + //get the hashtable file and path Path path = new Path(tmpURIPath); FileSystem fs = path.getFileSystem(hconf); File file = new File(path.toUri().getPath()); fs.create(path); + fileLength= hashTable.flushMemoryCacheToPersistent(file); + console.printInfo(Utilities.now()+"\t Upload 1 File to: "+tmpURIPath+" File size: "+fileLength); - fileLength = hashTable.flushMemoryCacheToPersistent(file); - - console.printInfo(Utilities.now() + "\t Processing rows: " - + rowNumber + "\t key number:" + hashTable.size()); - console.printInfo("Upload 1 File to: " + tmpURIPath + " File size: " - + fileLength); - hashTable.close(); } } super.closeOp(abort); - } catch (Exception e) { - LOG.error("Generate Hashtable error"); - e.printStackTrace(); - } + }catch(Exception e){ + LOG.error("Generate Hashtable error"); + e.printStackTrace(); + } } /** * Implements the getName function for the Node Interface. - * + * * @return the name of the operator */ @Override @@ -424,4 +458,6 @@ return OperatorType.HASHTABLESINK; } + + } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java (working copy) @@ -34,6 +34,7 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; import org.apache.hadoop.util.StringUtils; @@ -42,8 +43,7 @@ * Task implementation. **/ -public abstract class Task implements Serializable, - Node { +public abstract class Task implements Serializable, Node { private static final long serialVersionUID = 1L; protected transient boolean started; @@ -81,8 +81,7 @@ this.taskCounters = new HashMap(); } - public void initialize(HiveConf conf, QueryPlan queryPlan, - DriverContext driverContext) { + public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext driverContext) { this.queryPlan = queryPlan; isdone = false; started = false; @@ -103,8 +102,8 @@ } /** - * This method is called in the Driver on every task. It updates counters and - * calls execute(), which is overridden in each task + * This method is called in the Driver on every task. It updates counters and calls execute(), + * which is overridden in each task * * @return return value of execute() */ @@ -127,8 +126,7 @@ } /** - * This method is overridden in each Task. TODO execute should return a - * TaskHandle. + * This method is overridden in each Task. TODO execute should return a TaskHandle. * * @return status of executing the task */ @@ -161,9 +159,8 @@ } /** - * The default dependent tasks are just child tasks, but different types - * could implement their own (e.g. ConditionalTask will use the listTasks - * as dependents). + * The default dependent tasks are just child tasks, but different types could implement their own + * (e.g. ConditionalTask will use the listTasks as dependents). * * @return a list of tasks that are dependent on this task. */ @@ -172,8 +169,8 @@ } /** - * Add a dependent task on the current task. Return if the dependency already - * existed or is this a new one + * Add a dependent task on the current task. Return if the dependency already existed or is this a + * new one * * @return true if the task got added false if it already existed */ @@ -204,8 +201,7 @@ public void removeDependentTask(Task dependent) { if ((getChildTasks() != null) && (getChildTasks().contains(dependent))) { getChildTasks().remove(dependent); - if ((dependent.getParentTasks() != null) - && (dependent.getParentTasks().contains(this))) { + if ((dependent.getParentTasks() != null) && (dependent.getParentTasks().contains(this))) { dependent.getParentTasks().remove(this); } } @@ -279,6 +275,10 @@ return false; } + public boolean isMapRedLocalTask() { + return false; + } + public boolean hasReduce() { return false; } @@ -288,8 +288,7 @@ } /** - * Should be overridden to return the type of the specific task among the - * types in TaskType. + * Should be overridden to return the type of the specific task among the types in TaskType. * * @return TaskTypeType.* or -1 if not overridden */ @@ -299,21 +298,23 @@ } /** - * If this task uses any map-reduce intermediate data (either for reading - * or for writing), localize them (using the supplied Context). Map-Reduce - * intermediate directories are allocated using Context.getMRTmpFileURI() - * and can be localized using localizeMRTmpFileURI(). + * If this task uses any map-reduce intermediate data (either for reading or for writing), + * localize them (using the supplied Context). Map-Reduce intermediate directories are allocated + * using Context.getMRTmpFileURI() and can be localized using localizeMRTmpFileURI(). * - * This method is declared abstract to force any task code to explicitly - * deal with this aspect of execution. + * This method is declared abstract to force any task code to explicitly deal with this aspect of + * execution. * - * @param ctx context object with which to localize + * @param ctx + * context object with which to localize */ abstract protected void localizeMRTmpFilesImpl(Context ctx); /** * Localize a task tree - * @param ctx context object with which to localize + * + * @param ctx + * context object with which to localize */ public final void localizeMRTmpFiles(Context ctx) { localizeMRTmpFilesImpl(ctx); @@ -322,7 +323,7 @@ return; } - for (Task t: childTasks) { + for (Task t : childTasks) { t.localizeMRTmpFiles(ctx); } } @@ -330,12 +331,13 @@ /** * Subscribe the feed of publisher. To prevent cycles, a task can only subscribe to its ancestor. * Feed is a generic form of execution-time feedback (type, value) pair from one task to another - * task. Examples include dynamic partitions (which are only available at execution time). - * The MoveTask may pass the list of dynamic partitions to the StatsTask since after the - * MoveTask the list of dynamic partitions are lost (MoveTask moves them to the table's - * destination directory which is mixed with old partitions). + * task. Examples include dynamic partitions (which are only available at execution time). The + * MoveTask may pass the list of dynamic partitions to the StatsTask since after the MoveTask the + * list of dynamic partitions are lost (MoveTask moves them to the table's destination directory + * which is mixed with old partitions). * - * @param publisher this feed provider. + * @param publisher + * this feed provider. */ public void subscribeFeed(Task publisher) { if (publisher != this && publisher.ancestorOrSelf(this)) { @@ -353,7 +355,7 @@ } List> deps = getDependentTasks(); if (deps != null) { - for (Task d: deps) { + for (Task d : deps) { if (d.ancestorOrSelf(desc)) { return true; } @@ -373,7 +375,7 @@ // push the feed to its subscribers protected void pushFeed(FeedType feedType, Object feedValue) { if (feedSubscribers != null) { - for (Task s: feedSubscribers) { + for (Task s : feedSubscribers) { s.receiveFeed(feedType, feedValue); } } @@ -383,10 +385,38 @@ protected void receiveFeed(FeedType feedType, Object feedValue) { } - protected void cloneConf () { + protected void cloneConf() { if (!clonedConf) { clonedConf = true; conf = new HiveConf(conf); } } -} \ No newline at end of file + + public void replaceWithConditionalTask(ConditionalTask cndTsk, PhysicalContext physicalContext) { + // add this task into task tree + // set all parent tasks + List> parentTasks = this.getParentTasks(); + this.setParentTasks(null); + if (parentTasks != null) { + for (Task tsk : parentTasks) { + // make new generated task depends on all the parent tasks of current task. + tsk.addDependentTask(cndTsk); + // remove the current task from its original parent task's dependent task + tsk.removeDependentTask(this); + } + } else { + // remove from current root task and add conditional task to root tasks + physicalContext.removeFromRootTask(this); + physicalContext.addToRootTask(cndTsk); + } + // set all child tasks + List> oldChildTasks = this.getChildTasks(); + if (oldChildTasks != null) { + for (Task tsk : cndTsk.getListTasks()) { + for (Task oldChild : oldChildTasks) { + tsk.addDependentTask(oldChild); + } + } + } + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnInfo.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnInfo.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnInfo.java (working copy) @@ -49,7 +49,7 @@ private boolean isVirtualCol; private transient TypeInfo type; - + private boolean isHiddenVirtualCol; public ColumnInfo() { @@ -66,7 +66,7 @@ .getPrimitiveTypeInfoFromPrimitiveWritable(type), tabAlias, isVirtualCol, false); } - + public ColumnInfo(String internalName, TypeInfo type, String tabAlias, boolean isVirtualCol, boolean isHiddenVirtualCol) { this.internalName = internalName; @@ -99,7 +99,7 @@ public boolean getIsVirtualCol() { return isVirtualCol; } - + public boolean isHiddenVirtualCol() { return isHiddenVirtualCol; } @@ -120,4 +120,17 @@ return alias; } + public void setTabAlias(String tabAlias) { + this.tabAlias = tabAlias; + } + + public void setVirtualCol(boolean isVirtualCol) { + this.isVirtualCol = isVirtualCol; + } + + public void setHiddenVirtualCol(boolean isHiddenVirtualCol) { + this.isHiddenVirtualCol = isHiddenVirtualCol; + } + + } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java (working copy) @@ -28,17 +28,17 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.Set; import java.util.TreeMap; +import java.util.Map.Entry; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.ExplainWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.hive.ql.DriverContext; -import org.apache.hadoop.hive.ql.Context; /** @@ -304,7 +304,7 @@ return; } dependeciesTaskSet.add(task); - + boolean first = true; out.print(indentString(indent)); out.printf("%s", task.getId()); if ((task.getParentTasks() == null || task.getParentTasks().isEmpty())) { @@ -313,7 +313,7 @@ } } else { out.print(" depends on stages: "); - boolean first = true; + first = true; for (Task parent : task.getParentTasks()) { if (!first) { out.print(", "); @@ -321,22 +321,23 @@ first = false; out.print(parent.getId()); } + } - if (task instanceof ConditionalTask - && ((ConditionalTask) task).getListTasks() != null) { - out.print(" , consists of "); - first = true; - for (Task con : ((ConditionalTask) task) - .getListTasks()) { - if (!first) { - out.print(", "); - } - first = false; - out.print(con.getId()); + if (task instanceof ConditionalTask + && ((ConditionalTask) task).getListTasks() != null) { + out.print(" , consists of "); + first = true; + for (Task con : ((ConditionalTask) task) + .getListTasks()) { + if (!first) { + out.print(", "); } + first = false; + out.print(con.getId()); } + } - } + out.println(); if (task instanceof ConditionalTask @@ -406,7 +407,7 @@ @Override protected void localizeMRTmpFilesImpl(Context ctx) { - // explain task has nothing to localize + // explain task has nothing to localize // we don't expect to enter this code path at all throw new RuntimeException ("Unexpected call"); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java (working copy) @@ -32,13 +32,13 @@ /** * Conditional Task implementation. */ -public class ConditionalTask extends Task implements - Serializable { +public class ConditionalTask extends Task implements Serializable { private static final long serialVersionUID = 1L; private List> listTasks; private boolean resolved = false; + private List> resTasks; private ConditionalResolver resolver; @@ -48,6 +48,7 @@ super(); } + @Override public boolean isMapRedTask() { for (Task task : listTasks) { @@ -71,8 +72,7 @@ } @Override - public void initialize(HiveConf conf, QueryPlan queryPlan, - DriverContext driverContext) { + public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext driverContext) { super.initialize(conf, queryPlan, driverContext); } @@ -80,15 +80,35 @@ public int execute(DriverContext driverContext) { resTasks = resolver.getTasks(conf, resolverCtx); resolved = true; + Task backupTask = resolver.getBackUpTask(conf, resolverCtx); + List> backupChildren = null; + if (backupTask != null) { + backupChildren = backupTask.getChildTasks(); + } + for (Task tsk : getListTasks()) { if (!resTasks.contains(tsk)) { driverContext.getRunnable().remove(tsk); - console.printInfo(ExecDriver.getJobEndMsg("" - + Utilities.randGen.nextInt()) + console.printInfo(ExecDriver.getJobEndMsg("" + Utilities.randGen.nextInt()) + ", job is filtered out (removed at runtime)."); - if(tsk.isMapRedTask()) { + if (tsk.isMapRedTask()) { driverContext.incCurJobNo(1); + } else if (tsk.isMapRedLocalTask()) { + List> taskList = tsk.getChildTasks(); + + for(Task childTask : taskList) { + if (childTask.getChildTasks() != null) { + for (Task grandChildTask : childTask.getChildTasks()) { + grandChildTask.parentTasks.remove(childTask); + if (DriverContext.isLaunchable(grandChildTask)) { + driverContext.addToRunnable(grandChildTask); + } + } + } + } + continue; } + if (tsk.getChildTasks() != null) { for (Task child : tsk.getChildTasks()) { child.parentTasks.remove(tsk); @@ -97,13 +117,21 @@ } } } - } else if (!driverContext.getRunnable().contains(tsk)) { - driverContext.addToRunnable(tsk); + } else { + // resolved task + if (!driverContext.getRunnable().contains(tsk)) { + driverContext.addToRunnable(tsk); + if (backupTask != null) { + driverContext.addBackUpTaskMapping(tsk, backupTask); + driverContext.addBackUpChildrenTasks(backupTask, backupChildren); + } + } } } return 0; } + /** * @return the resolver */ @@ -184,9 +212,8 @@ } /** - * Add a dependent task on the current conditional task. The task will not be - * a direct child of conditional task. Actually it will be added as child task - * of associated tasks. + * Add a dependent task on the current conditional task. The task will not be a direct child of + * conditional task. Actually it will be added as child task of associated tasks. * * @return true if the task got added false if it already existed */ @@ -204,7 +231,7 @@ @Override protected void localizeMRTmpFilesImpl(Context ctx) { if (getListTasks() != null) { - for(Task t: getListTasks()) { + for (Task t : getListTasks()) { t.localizeMRTmpFiles(ctx); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java (working copy) @@ -58,7 +58,6 @@ private long numRows = 0; private long nextCntr = 1; private MapredLocalWork localWork = null; - private boolean isLogInfoEnabled = false; private final ExecMapperContext execContext = new ExecMapperContext(); @@ -68,8 +67,6 @@ memoryMXBean = ManagementFactory.getMemoryMXBean(); l4j.info("maximum memory = " + memoryMXBean.getHeapMemoryUsage().getMax()); - isLogInfoEnabled = l4j.isInfoEnabled(); - try { l4j.info("conf classpath = " + Arrays.asList(((URLClassLoader) job.getClassLoader()).getURLs())); @@ -89,7 +86,7 @@ // initialize map operator mo.setChildren(job); l4j.info(mo.dump(0)); - // initialize map local work + // initialize map local work localWork = mrwork.getMapLocalWork(); execContext.setLocalWork(localWork); @@ -100,7 +97,6 @@ if (localWork == null) { return; } - //The following code is for mapjoin //initialize all the dummy ops l4j.info("Initializing dummy operator"); @@ -121,6 +117,7 @@ throw new RuntimeException("Map operator initialization failed", e); } } + } public void map(Object key, Object value, OutputCollector output, @@ -141,7 +138,7 @@ // Since there is no concept of a group, we don't invoke // startGroup/endGroup for a mapper mo.process((Writable)value); - if (isLogInfoEnabled) { + if (l4j.isInfoEnabled()) { numRows++; if (numRows == nextCntr) { long used_memory = memoryMXBean.getHeapMemoryUsage().getUsed(); @@ -210,7 +207,7 @@ } } - if (isLogInfoEnabled) { + if (l4j.isInfoEnabled()) { long used_memory = memoryMXBean.getHeapMemoryUsage().getUsed(); l4j.info("ExecMapper: processed " + numRows + " rows: used memory = " + used_memory); @@ -244,6 +241,8 @@ ExecMapper.done = done; } + + /** * reportStats. * Index: ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java (working copy) @@ -73,8 +73,9 @@ taskvec.add(new taskTuple(MapredLocalWork.class, MapredLocalTask.class)); taskvec.add(new taskTuple(StatsWork.class, - StatsTask.class)); + StatsTask.class)); + } private static ThreadLocal tid = new ThreadLocal() { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java (working copy) @@ -409,4 +409,7 @@ return null; } + + + } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java (working copy) @@ -33,6 +33,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; @@ -43,20 +44,20 @@ * the main memory hash table exceeds a certain threshold, new elements will go into the persistent * hash table. */ + public class HashMapWrapper implements Serializable { + private static final long serialVersionUID = 1L; protected Log LOG = LogFactory.getLog(this.getClass().getName()); // default threshold for using main memory based HashMap + private static final int THRESHOLD = 1000000; private static final float LOADFACTOR = 0.75f; + private static final float MEMORYUSAGE = 1; - private double threshold; // threshold to put data into persistent hash table - // instead + private float maxMemoryUsage; private HashMap mHash; // main memory HashMap - - - protected transient LogHelper console; private File dumpFile; @@ -71,10 +72,9 @@ * @param threshold * User specified threshold to store new values into persistent storage. */ - public HashMapWrapper(int threshold, float loadFactor) { - this.threshold = 0.9; + public HashMapWrapper(int threshold, float loadFactor, float memoryUsage) { + maxMemoryUsage = memoryUsage; mHash = new HashMap(threshold, loadFactor); - console = new LogHelper(LOG); memoryMXBean = ManagementFactory.getMemoryMXBean(); maxMemory = memoryMXBean.getHeapMemoryUsage().getMax(); LOG.info("maximum memory: " + maxMemory); @@ -83,30 +83,28 @@ } public HashMapWrapper(int threshold) { - this(THRESHOLD, 0.75f); + this(threshold, LOADFACTOR, MEMORYUSAGE); } public HashMapWrapper() { - this(THRESHOLD, LOADFACTOR); + this(THRESHOLD, LOADFACTOR, MEMORYUSAGE); } - public V get(K key) { return mHash.get(key); } - public boolean put(K key, V value) throws HiveException { // isAbort(); mHash.put(key, value); return false; } + public void remove(K key) { mHash.remove(key); } - /** * Flush the main memory hash table into the persistent cache file * @@ -146,7 +144,6 @@ * @throws HiveException */ public void close() throws HiveException { - // isAbort(); mHash.clear(); } @@ -158,36 +155,25 @@ return mHash.size(); } - private boolean isAbort() { - int size = mHash.size(); - // if(size >= 1000000 && size % 1000000 == 0 ){ + public boolean isAbort(long numRows,LogHelper console) { System.gc(); System.gc(); + int size = mHash.size(); long usedMemory = memoryMXBean.getHeapMemoryUsage().getUsed(); double rate = (double) usedMemory / (double) maxMemory; long mem1 = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory(); - console.printInfo("Hashtable size:\t" + size + "\tMemory usage:\t" + usedMemory + "\t rate:\t" - + num.format(rate)); - return true; - + console.printInfo(Utilities.now() + "\t Processing rows:\t" + numRows + "\tHashtable size:\t" + + size + "\tMemory usage:\t" + usedMemory + "\t rate:\t" + num.format(rate)); + if (rate > (double) maxMemoryUsage) { + return true; + } + return false; } - public Log getLOG() { - return LOG; - } - public void setLOG(Log log) { LOG = log; } - public double getThreshold() { - return threshold; - } - - public void setThreshold(double threshold) { - this.threshold = threshold; - } - public HashMap getMHash() { return mHash; } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MapredLocalTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapredLocalTask.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapredLocalTask.java (working copy) @@ -57,7 +57,7 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.util.ReflectionUtils; -public class MapredLocalTask extends Task implements Serializable { +public class MapredLocalTask extends Task implements Serializable { private Map fetchOperators; private JobConf job; @@ -67,11 +67,11 @@ static final String[] HIVE_SYS_PROP = {"build.dir", "build.dir.hive"}; public static MemoryMXBean memoryMXBean; - // not sure we need this exec context; but all the operators in the work + // not sure we need this exec context; but all the operators in the work // will pass this context throught private final ExecMapperContext execContext = new ExecMapperContext(); - public MapredLocalTask(){ + public MapredLocalTask() { super(); } @@ -83,25 +83,23 @@ } @Override - public void initialize(HiveConf conf, QueryPlan queryPlan, - DriverContext driverContext) { + public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext driverContext) { super.initialize(conf, queryPlan, driverContext); job = new JobConf(conf, ExecDriver.class); } - public static String now(){ + public static String now() { Calendar cal = Calendar.getInstance(); SimpleDateFormat sdf = new SimpleDateFormat("yyyy-mm-dd hh:mm:ss"); - return sdf.format(cal.getTime()); + return sdf.format(cal.getTime()); } @Override - public int execute(DriverContext driverContext){ - try{ - //generate the cmd line to run in the child jvm - //String hadoopExec = conf.getVar(HiveConf.ConfVars.HADOOPBIN); + public int execute(DriverContext driverContext) { + try { + // generate the cmd line to run in the child jvm Context ctx = driverContext.getCtx(); String hiveJar = conf.getJar(); @@ -115,16 +113,15 @@ LOG.info("Generating plan file " + planPath.toString()); Utilities.serializeMapRedLocalWork(plan, out); - String isSilent = "true".equalsIgnoreCase(System - .getProperty("test.silent")) ? "-nolog" : ""; + String isSilent = "true".equalsIgnoreCase(System.getProperty("test.silent")) ? "-nolog" : ""; String jarCmd; - jarCmd = hiveJar + " " + ExecDriver.class.getName() ; + jarCmd = hiveJar + " " + ExecDriver.class.getName(); String hiveConfArgs = ExecDriver.generateCmdLine(conf); - String cmdLine = hadoopExec + " jar " + jarCmd + " -localtask -plan " - + planPath.toString() + " " + isSilent + " " + hiveConfArgs; + String cmdLine = hadoopExec + " jar " + jarCmd + " -localtask -plan " + planPath.toString() + + " " + isSilent + " " + hiveConfArgs; String workDir = (new File(".")).getCanonicalPath(); String files = ExecDriver.getResourceFiles(conf, SessionState.ResourceType.FILE); @@ -134,16 +131,16 @@ workDir = (new Path(ctx.getLocalTmpFileURI())).toUri().getPath(); - if (! (new File(workDir)).mkdir()) { - throw new IOException ("Cannot create tmp working dir: " + workDir); + if (!(new File(workDir)).mkdir()) { + throw new IOException("Cannot create tmp working dir: " + workDir); } - for (String f: StringUtils.split(files, ',')) { + for (String f : StringUtils.split(files, ',')) { Path p = new Path(f); String target = p.toUri().getPath(); String link = workDir + Path.SEPARATOR + p.getName(); if (FileUtil.symLink(target, link) != 0) { - throw new IOException ("Cannot link to added file: " + target + " from: " + link); + throw new IOException("Cannot link to added file: " + target + " from: " + link); } } } @@ -166,31 +163,31 @@ Map variables = new HashMap(System.getenv()); // The user can specify the hadoop memory - //if ("local".equals(conf.getVar(HiveConf.ConfVars.HADOOPJT))) { - // if we are running in local mode - then the amount of memory used - // by the child jvm can no longer default to the memory used by the - // parent jvm - //int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM); - int hadoopMem= conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM);; + // if ("local".equals(conf.getVar(HiveConf.ConfVars.HADOOPJT))) { + // if we are running in local mode - then the amount of memory used + // by the child jvm can no longer default to the memory used by the + // parent jvm + // int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM); + int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM); + ; if (hadoopMem == 0) { // remove env var that would default child jvm to use parent's memory // as default. child jvm would use default memory for a hadoop client variables.remove(HADOOP_MEM_KEY); } else { // user specified the memory for local mode hadoop run - console.printInfo(" set heap size\t"+hadoopMem+"MB"); + console.printInfo(" set heap size\t" + hadoopMem + "MB"); variables.put(HADOOP_MEM_KEY, String.valueOf(hadoopMem)); } - //} else { - // nothing to do - we are not running in local mode - only submitting - // the job via a child process. in this case it's appropriate that the - // child jvm use the same memory as the parent jvm + // } else { + // nothing to do - we are not running in local mode - only submitting + // the job via a child process. in this case it's appropriate that the + // child jvm use the same memory as the parent jvm - //} + // } if (variables.containsKey(HADOOP_OPTS_KEY)) { - variables.put(HADOOP_OPTS_KEY, variables.get(HADOOP_OPTS_KEY) - + hadoopOpts); + variables.put(HADOOP_OPTS_KEY, variables.get(HADOOP_OPTS_KEY) + hadoopOpts); } else { variables.put(HADOOP_OPTS_KEY, hadoopOpts); } @@ -205,10 +202,8 @@ // Run ExecDriver in another JVM executor = Runtime.getRuntime().exec(cmdLine, env, new File(workDir)); - StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), - null, System.out); - StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), - null, System.err); + StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out); + StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, System.err); outPrinter.start(); errPrinter.start(); @@ -217,7 +212,7 @@ if (exitVal != 0) { LOG.error("Execution failed with exit status: " + exitVal); - console.printError("Mapred Local Task Failed. Give up the map join stragery"); + console.printInfo("Mapred Local Task Failed. Give up the map join stragery"); } else { LOG.info("Execution completed successfully"); console.printInfo("Mapred Local Task Running Successfully . Keep using map join stragery"); @@ -233,54 +228,56 @@ - public int executeFromChildJVM(DriverContext driverContext){ - + public int executeFromChildJVM(DriverContext driverContext) { // check the local work - if(work == null){ + if (work == null) { return -1; } memoryMXBean = ManagementFactory.getMemoryMXBean(); - console.printInfo(Utilities.now()+"\tStarting to luaunch local task to process map join "); + console.printInfo(Utilities.now() + "\tStarting to luaunch local task to process map join "); console.printInfo("\tmaximum memory = " + memoryMXBean.getHeapMemoryUsage().getMax()); fetchOperators = new HashMap(); Map fetchOpJobConfMap = new HashMap(); execContext.setJc(job); - //set the local work, so all the operator can get this context + // set the local work, so all the operator can get this context execContext.setLocalWork(work); boolean inputFileChangeSenstive = work.getInputFileChangeSensitive(); - try{ + try { initializeOperators(fetchOpJobConfMap); - //for each big table's bucket, call the start forward - if(inputFileChangeSenstive){ - for( LinkedHashMap> bigTableBucketFiles: - work.getBucketMapjoinContext().getAliasBucketFileNameMapping().values()){ - for(String bigTableBucket: bigTableBucketFiles.keySet()){ - startForward(inputFileChangeSenstive,bigTableBucket); + // for each big table's bucket, call the start forward + if (inputFileChangeSenstive) { + for (LinkedHashMap> bigTableBucketFiles : work + .getBucketMapjoinContext().getAliasBucketFileNameMapping().values()) { + for (String bigTableBucket : bigTableBucketFiles.keySet()) { + startForward(inputFileChangeSenstive, bigTableBucket); } } - }else{ - startForward(inputFileChangeSenstive,null); + } else { + startForward(inputFileChangeSenstive, null); } - console.printInfo(now()+"\tEnd of local task "); + console.printInfo(Utilities.now() + "\tEnd of local task "); } catch (Throwable e) { - if (e instanceof OutOfMemoryError) { + if (e instanceof OutOfMemoryError + || (e instanceof HiveException && e.getMessage().equals("RunOutOfMeomoryUsage"))) { // Don't create a new object if we are already out of memory l4j.error("Out of Memory Error"); - console.printError("[Warning] Small table is too large to put into memory"); - return 2; + console + .printInfo("[Warning] HashTableSink Operator Failed. The memory usage is more than the configurated; Backup Task will be launched."); + return 3; } else { l4j.error("Hive Runtime Error: Map local work failed"); e.printStackTrace(); + return 2; } - }finally{ - console.printInfo(Utilities.now()+"\tFinish running local task"); + } finally { + console.printInfo(Utilities.now() + "\tFinish running local task"); } return 0; } private void startForward(boolean inputFileChangeSenstive, String bigTableBucket) - throws Exception{ + throws Exception { for (Map.Entry entry : fetchOperators.entrySet()) { int fetchOpRows = 0; String alias = entry.getKey(); @@ -288,17 +285,17 @@ if (inputFileChangeSenstive) { fetchOp.clearFetchContext(); - setUpFetchOpContext(fetchOp, alias,bigTableBucket); + setUpFetchOpContext(fetchOp, alias, bigTableBucket); } - //get the root operator + // get the root operator Operator forwardOp = work.getAliasToWork().get(alias); - //walk through the operator tree + // walk through the operator tree while (true) { InspectableObject row = fetchOp.getNextRow(); if (row == null) { if (inputFileChangeSenstive) { - String fileName=this.getFileName(bigTableBucket); + String fileName = this.getFileName(bigTableBucket); execContext.setCurrentBigBucketFile(fileName); forwardOp.reset(); } @@ -310,22 +307,23 @@ // check if any operator had a fatal error or early exit during // execution if (forwardOp.getDone()) { - //ExecMapper.setDone(true); + // ExecMapper.setDone(true); break; } } } } + private void initializeOperators(Map fetchOpJobConfMap) - throws HiveException{ + throws HiveException { // this mapper operator is used to initialize all the operators for (Map.Entry entry : work.getAliasToFetchWork().entrySet()) { JobConf jobClone = new JobConf(job); Operator tableScan = work.getAliasToWork().get(entry.getKey()); boolean setColumnsNeeded = false; - if(tableScan instanceof TableScanOperator) { - ArrayList list = ((TableScanOperator)tableScan).getNeededColumnIDs(); + if (tableScan instanceof TableScanOperator) { + ArrayList list = ((TableScanOperator) tableScan).getNeededColumnIDs(); if (list != null) { ColumnProjectionUtils.appendReadColumnIDs(jobClone, list); setColumnsNeeded = true; @@ -336,18 +334,18 @@ ColumnProjectionUtils.setFullyReadColumns(jobClone); } - //create a fetch operator - FetchOperator fetchOp = new FetchOperator(entry.getValue(),jobClone); + // create a fetch operator + FetchOperator fetchOp = new FetchOperator(entry.getValue(), jobClone); fetchOpJobConfMap.put(fetchOp, jobClone); fetchOperators.put(entry.getKey(), fetchOp); l4j.info("fetchoperator for " + entry.getKey() + " created"); } - //initilize all forward operator + // initilize all forward operator for (Map.Entry entry : fetchOperators.entrySet()) { - //get the forward op + // get the forward op Operator forwardOp = work.getAliasToWork().get(entry.getKey()); - //put the exe context into all the operators + // put the exe context into all the operators forwardOp.setExecContext(execContext); // All the operators need to be initialized before process FetchOperator fetchOp = entry.getValue(); @@ -356,54 +354,58 @@ if (jobConf == null) { jobConf = job; } - //initialize the forward operator + // initialize the forward operator forwardOp.initialize(jobConf, new ObjectInspector[] {fetchOp.getOutputObjectInspector()}); l4j.info("fetchoperator for " + entry.getKey() + " initialized"); } } - private void setUpFetchOpContext(FetchOperator fetchOp, String alias,String currentInputFile) - throws Exception { + private void setUpFetchOpContext(FetchOperator fetchOp, String alias, String currentInputFile) + throws Exception { - BucketMapJoinContext bucketMatcherCxt = this.work - .getBucketMapjoinContext(); + BucketMapJoinContext bucketMatcherCxt = this.work.getBucketMapjoinContext(); - Class bucketMatcherCls = bucketMatcherCxt - .getBucketMatcherClass(); - BucketMatcher bucketMatcher = (BucketMatcher) ReflectionUtils.newInstance( - bucketMatcherCls, null); - bucketMatcher.setAliasBucketFileNameMapping(bucketMatcherCxt - .getAliasBucketFileNameMapping()); + Class bucketMatcherCls = bucketMatcherCxt.getBucketMatcherClass(); + BucketMatcher bucketMatcher = (BucketMatcher) ReflectionUtils.newInstance(bucketMatcherCls, + null); + bucketMatcher.setAliasBucketFileNameMapping(bucketMatcherCxt.getAliasBucketFileNameMapping()); - List aliasFiles = bucketMatcher.getAliasBucketFiles(currentInputFile, - bucketMatcherCxt.getMapJoinBigTableAlias(), alias); + List aliasFiles = bucketMatcher.getAliasBucketFiles(currentInputFile, bucketMatcherCxt + .getMapJoinBigTableAlias(), alias); Iterator iter = aliasFiles.iterator(); fetchOp.setupContext(iter, null); } - private String getFileName(String path){ - if(path== null || path.length()==0) { + private String getFileName(String path) { + if (path == null || path.length() == 0) { return null; } - int last_separator = path.lastIndexOf(Path.SEPARATOR)+1; + int last_separator = path.lastIndexOf(Path.SEPARATOR) + 1; String fileName = path.substring(last_separator); return fileName; } + @Override - public void localizeMRTmpFilesImpl(Context ctx){ + public void localizeMRTmpFilesImpl(Context ctx) { } @Override + public boolean isMapRedLocalTask() { + return true; + } + + @Override public String getName() { return "MAPREDLOCAL"; } + @Override public int getType() { - //assert false; + // assert false; return StageType.MAPREDLOCAL; } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java (working copy) @@ -194,12 +194,54 @@ * Returns an operator given the conf and a list of parent operators. */ public static Operator getAndMakeChild(T conf, + List> oplist) { + Operator ret = get((Class) conf.getClass()); + ret.setConf(conf); + if (oplist.size() == 0) { + return (ret); + } + + // Add the new operator as child of each of the passed in operators + for (Operator op : oplist) { + List children = op.getChildOperators(); + if (children == null) { + children = new ArrayList(); + } + children.add(ret); + op.setChildOperators(children); + } + + // add parents for the newly created operator + List> parent = new ArrayList>(); + for (Operator op : oplist) { + parent.add(op); + } + + ret.setParentOperators(parent); + + return (ret); + } + + /** + * Returns an operator given the conf and a list of parent operators. + */ + public static Operator getAndMakeChild(T conf, RowSchema rwsch, Operator... oplist) { Operator ret = getAndMakeChild(conf, oplist); ret.setSchema(rwsch); return (ret); } + /** + * Returns an operator given the conf and a list of parent operators. + */ + public static Operator getAndMakeChild(T conf, + RowSchema rwsch, List> oplist) { + Operator ret = getAndMakeChild(conf, oplist); + ret.setSchema(rwsch); + return (ret); + } + private OperatorFactory() { // prevent instantiation } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/PathUtil.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/PathUtil.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/PathUtil.java (revision 0) @@ -0,0 +1,20 @@ +package org.apache.hadoop.hive.ql.exec; + +import org.apache.hadoop.fs.Path; + +public class PathUtil { + public static String suffix=".hashtable"; + public static String generatePath(String baseURI,Byte tag,String bigBucketFileName){ + String path = new String(baseURI+Path.SEPARATOR+"-"+tag+"-"+bigBucketFileName+suffix); + return path; + } + public static String generateFileName(Byte tag,String bigBucketFileName){ + String fileName = new String("-"+tag+"-"+bigBucketFileName+suffix); + return fileName; + } + + public static String generateTmpURI(String baseURI,String id){ + String tmpFileURI = new String(baseURI+Path.SEPARATOR+"HashTable-"+id); + return tmpFileURI; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java (working copy) @@ -111,8 +111,7 @@ super(); } - protected static String getResourceFiles(Configuration conf, - SessionState.ResourceType t) { + protected static String getResourceFiles(Configuration conf, SessionState.ResourceType t) { // fill in local files to be added to the task environment SessionState ss = SessionState.get(); Set files = (ss == null) ? null : ss.list_resource(t, null); @@ -122,8 +121,8 @@ try { realFiles.add(Utilities.realFile(one, conf)); } catch (IOException e) { - throw new RuntimeException("Cannot validate file " + one - + "due to exception: " + e.getMessage(), e); + throw new RuntimeException("Cannot validate file " + one + "due to exception: " + + e.getMessage(), e); } } return StringUtils.join(realFiles, ","); @@ -143,8 +142,7 @@ * Initialization when invoked from QL. */ @Override - public void initialize(HiveConf conf, QueryPlan queryPlan, - DriverContext driverContext) { + public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext driverContext) { super.initialize(conf, queryPlan, driverContext); job = new JobConf(conf, ExecDriver.class); @@ -164,8 +162,7 @@ if (StringUtils.isNotBlank(addedJars)) { HiveConf.setVar(job, ConfVars.HIVEADDEDJARS, addedJars); } - String addedArchives = getResourceFiles(job, - SessionState.ResourceType.ARCHIVE); + String addedArchives = getResourceFiles(job, SessionState.ResourceType.ARCHIVE); if (StringUtils.isNotBlank(addedArchives)) { HiveConf.setVar(job, ConfVars.HIVEADDEDARCHIVES, addedArchives); } @@ -182,24 +179,24 @@ } /** - * A list of the currently running jobs spawned in this Hive instance that is - * used to kill all running jobs in the event of an unexpected shutdown - - * i.e., the JVM shuts down while there are still jobs running. + * A list of the currently running jobs spawned in this Hive instance that is used to kill all + * running jobs in the event of an unexpected shutdown - i.e., the JVM shuts down while there are + * still jobs running. */ - private static Map runningJobKillURIs = - Collections.synchronizedMap(new HashMap()); + private static Map runningJobKillURIs = Collections + .synchronizedMap(new HashMap()); /** - * In Hive, when the user control-c's the command line, any running jobs - * spawned from that command line are best-effort killed. + * In Hive, when the user control-c's the command line, any running jobs spawned from that command + * line are best-effort killed. * - * This static constructor registers a shutdown thread to iterate over all the - * running job kill URLs and do a get on them. + * This static constructor registers a shutdown thread to iterate over all the running job kill + * URLs and do a get on them. * */ static { - if (new org.apache.hadoop.conf.Configuration().getBoolean( - "webinterface.private.actions", false)) { + if (new org.apache.hadoop.conf.Configuration() + .getBoolean("webinterface.private.actions", false)) { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { @@ -207,13 +204,13 @@ for (String uri : runningJobKillURIs.values()) { try { System.err.println("killing job with: " + uri); - java.net.HttpURLConnection conn = (java.net.HttpURLConnection) new java.net.URL( - uri).openConnection(); + java.net.HttpURLConnection conn = (java.net.HttpURLConnection) new java.net.URL(uri) + .openConnection(); conn.setRequestMethod("POST"); int retCode = conn.getResponseCode(); if (retCode != 200) { - System.err.println("Got an error trying to kill job with URI: " - + uri + " = " + retCode); + System.err.println("Got an error trying to kill job with URI: " + uri + " = " + + retCode); } } catch (Exception e) { System.err.println("trying to kill job, caught: " + e); @@ -235,22 +232,19 @@ } else { String hp = job.get("mapred.job.tracker"); if (SessionState.get() != null) { - SessionState.get().getHiveHistory().setTaskProperty( - SessionState.get().getQueryId(), getId(), Keys.TASK_HADOOP_ID, - rj.getJobID()); + SessionState.get().getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), + getId(), Keys.TASK_HADOOP_ID, rj.getJobID()); } - console.printInfo(ExecDriver.getJobStartMsg(rj.getJobID()) - + ", Tracking URL = " + rj.getTrackingURL()); - console.printInfo("Kill Command = " - + HiveConf.getVar(job, HiveConf.ConfVars.HADOOPBIN) + console.printInfo(ExecDriver.getJobStartMsg(rj.getJobID()) + ", Tracking URL = " + + rj.getTrackingURL()); + console.printInfo("Kill Command = " + HiveConf.getVar(job, HiveConf.ConfVars.HADOOPBIN) + " job -Dmapred.job.tracker=" + hp + " -kill " + rj.getJobID()); } } /** - * This class contains the state of the running task Going forward, we will - * return this handle from execute and Driver can split execute into start, - * monitorProgess and postProcess. + * This class contains the state of the running task Going forward, we will return this handle + * from execute and Driver can split execute into start, monitorProgess and postProcess. */ private static class ExecDriverTaskHandle extends TaskHandle { JobClient jc; @@ -280,16 +274,14 @@ } /** - * Fatal errors are those errors that cannot be recovered by retries. These - * are application dependent. Examples of fatal errors include: - the small - * table in the map-side joins is too large to be feasible to be handled by - * one mapper. The job should fail and the user should be warned to use - * regular joins rather than map-side joins. Fatal errors are indicated by - * counters that are set at execution time. If the counter is non-zero, a - * fatal error occurred. The value of the counter indicates the error type. + * Fatal errors are those errors that cannot be recovered by retries. These are application + * dependent. Examples of fatal errors include: - the small table in the map-side joins is too + * large to be feasible to be handled by one mapper. The job should fail and the user should be + * warned to use regular joins rather than map-side joins. Fatal errors are indicated by counters + * that are set at execution time. If the counter is non-zero, a fatal error occurred. The value + * of the counter indicates the error type. * - * @return true if fatal errors happened during job execution, false - * otherwise. + * @return true if fatal errors happened during job execution, false otherwise. */ private boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { if (ctrs == null) { @@ -299,7 +291,7 @@ } // check for number of created files long numFiles = ctrs.getCounter(ProgressCounter.CREATED_FILES); - long upperLimit = HiveConf.getLongVar(job, HiveConf.ConfVars.MAXCREATEDFILES); + long upperLimit = HiveConf.getLongVar(job, HiveConf.ConfVars.MAXCREATEDFILES); if (numFiles > upperLimit) { errMsg.append("total number of created files exceeds ").append(upperLimit); return true; @@ -335,8 +327,7 @@ } catch (InterruptedException e) { } - if (initializing && - ShimLoader.getHadoopShims().isJobPreparing(rj)) { + if (initializing && ShimLoader.getHadoopShims().isJobPreparing(rj)) { // No reason to poll untill the job is initialized continue; } else { @@ -366,8 +357,7 @@ Counters ctrs = th.getCounters(); if (fatal = checkFatalErrors(ctrs, errMsg)) { - console.printError("[Fatal Error] " + errMsg.toString() - + ". Killing the job."); + console.printError("[Fatal Error] " + errMsg.toString() + ". Killing the job."); rj.killJob(); continue; } @@ -375,8 +365,8 @@ updateCounters(ctrs, rj); - String report = " " + getId() + " map = " + mapProgress + "%, reduce = " - + reduceProgress + "%"; + String report = " " + getId() + " map = " + mapProgress + "%, reduce = " + reduceProgress + + "%"; if (!report.equals(lastReport) || System.currentTimeMillis() >= reportTime + maxReportInterval) { @@ -386,12 +376,10 @@ String output = dateFormat.format(Calendar.getInstance().getTime()) + report; SessionState ss = SessionState.get(); if (ss != null) { - ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), - getId(), ctrs); - ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), - getId(), Keys.TASK_HADOOP_PROGRESS, output); - ss.getHiveHistory().progressTask(SessionState.get().getQueryId(), - this); + ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs); + ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(), + Keys.TASK_HADOOP_PROGRESS, output); + ss.getHiveHistory().progressTask(SessionState.get().getQueryId(), this); ss.getHiveHistory().logPlanProgress(queryPlan); } console.printInfo(output); @@ -411,7 +399,7 @@ if (checkFatalErrors(ctrs, errMsg)) { console.printError("[Fatal Error] " + errMsg.toString()); success = false; - } else { + } else { success = rj.isSuccessful(); } } @@ -434,10 +422,8 @@ private void updateCounters(Counters ctrs, RunningJob rj) throws IOException { mapProgress = Math.round(rj.mapProgress() * 100); reduceProgress = Math.round(rj.reduceProgress() * 100); - taskCounters.put("CNTR_NAME_" + getId() + "_MAP_PROGRESS", Long - .valueOf(mapProgress)); - taskCounters.put("CNTR_NAME_" + getId() + "_REDUCE_PROGRESS", Long - .valueOf(reduceProgress)); + taskCounters.put("CNTR_NAME_" + getId() + "_MAP_PROGRESS", Long.valueOf(mapProgress)); + taskCounters.put("CNTR_NAME_" + getId() + "_REDUCE_PROGRESS", Long.valueOf(reduceProgress)); if (ctrs == null) { // hadoop might return null if it cannot locate the job. // we may still be able to retrieve the job status - so ignore @@ -512,8 +498,8 @@ job.setMapOutputValueClass(BytesWritable.class); try { - job.setPartitionerClass((Class) - (Class.forName(HiveConf.getVar(job, HiveConf.ConfVars.HIVEPARTITIONER)))); + job.setPartitionerClass((Class) (Class.forName(HiveConf.getVar(job, + HiveConf.ConfVars.HIVEPARTITIONER)))); } catch (ClassNotFoundException e) { throw new RuntimeException(e.getMessage()); } @@ -522,8 +508,8 @@ job.setNumMapTasks(work.getNumMapTasks().intValue()); } if (work.getMinSplitSize() != null) { - HiveConf.setIntVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, - work.getMinSplitSize().intValue()); + HiveConf.setIntVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work.getMinSplitSize() + .intValue()); } job.setNumReduceTasks(work.getNumReduceTasks().intValue()); job.setReducerClass(ExecReducer.class); @@ -533,11 +519,9 @@ } // Turn on speculative execution for reducers - boolean useSpeculativeExecReducers = - HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVESPECULATIVEEXECREDUCERS); - HiveConf.setBoolVar( - job, - HiveConf.ConfVars.HADOOPSPECULATIVEEXECREDUCERS, + boolean useSpeculativeExecReducers = HiveConf.getBoolVar(job, + HiveConf.ConfVars.HIVESPECULATIVEEXECREDUCERS); + HiveConf.setBoolVar(job, HiveConf.ConfVars.HADOOPSPECULATIVEEXECREDUCERS, useSpeculativeExecReducers); String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT); @@ -548,8 +532,7 @@ LOG.info("Using " + inpFormat); try { - job.setInputFormat((Class) (Class - .forName(inpFormat))); + job.setInputFormat((Class) (Class.forName(inpFormat))); } catch (ClassNotFoundException e) { throw new RuntimeException(e.getMessage()); } @@ -563,8 +546,9 @@ String auxJars = HiveConf.getVar(job, HiveConf.ConfVars.HIVEAUXJARS); String addedJars = HiveConf.getVar(job, HiveConf.ConfVars.HIVEADDEDJARS); if (StringUtils.isNotBlank(auxJars) || StringUtils.isNotBlank(addedJars)) { - String allJars = StringUtils.isNotBlank(auxJars) ? (StringUtils - .isNotBlank(addedJars) ? addedJars + "," + auxJars : auxJars) + String allJars = StringUtils.isNotBlank(auxJars) ? (StringUtils.isNotBlank(addedJars) ? addedJars + + "," + auxJars + : auxJars) : addedJars; LOG.info("adding libjars: " + allJars); initializeFiles("tmpjars", allJars); @@ -576,8 +560,7 @@ initializeFiles("tmpfiles", addedFiles); } // Transfer HIVEADDEDARCHIVES to "tmparchives" so hadoop understands it - String addedArchives = HiveConf.getVar(job, - HiveConf.ConfVars.HIVEADDEDARCHIVES); + String addedArchives = HiveConf.getVar(job, HiveConf.ConfVars.HIVEADDEDARCHIVES); if (StringUtils.isNotBlank(addedArchives)) { initializeFiles("tmparchives", addedArchives); } @@ -585,21 +568,18 @@ int returnVal = 0; RunningJob rj = null; - boolean noName = StringUtils.isEmpty(HiveConf.getVar(job, - HiveConf.ConfVars.HADOOPJOBNAME)); + boolean noName = StringUtils.isEmpty(HiveConf.getVar(job, HiveConf.ConfVars.HADOOPJOBNAME)); if (noName) { // This is for a special case to ensure unit tests pass - HiveConf.setVar(job, HiveConf.ConfVars.HADOOPJOBNAME, "JOB" - + Utilities.randGen.nextInt()); + HiveConf.setVar(job, HiveConf.ConfVars.HADOOPJOBNAME, "JOB" + Utilities.randGen.nextInt()); } try { - //propagate the file to distributed cache - MapredLocalWork localwork =work.getMapLocalWork(); - if(localwork != null){ + // propagate the file to distributed cache + MapredLocalWork localwork = work.getMapLocalWork(); + if (localwork != null) { boolean localMode = HiveConf.getVar(job, HiveConf.ConfVars.HADOOPJT).equals("local"); - if(!localMode){ - //LOG.info("=========Adding files to HDFS ================"); + if (!localMode) { Path localPath = new Path(localwork.getTmpFileURI()); Path hdfsPath = new Path(work.getTmpHDFSFileURI()); @@ -609,28 +589,24 @@ for (int i = 0; i < hashtableFiles.length; i++) { FileStatus file = hashtableFiles[i]; Path path = file.getPath(); - String fileName=path.getName(); - String hdfsFile = hdfsPath+Path.SEPARATOR+fileName; + String fileName = path.getName(); + String hdfsFile = hdfsPath + Path.SEPARATOR + fileName; LOG.info("Upload 1 HashTable from" + path + " to: " + hdfsFile); Path hdfsFilePath = new Path(hdfsFile); - //hdfs.setVerifyChecksum(false); - hdfs.copyFromLocalFile(path,hdfsFilePath ); - short replication = (short)job.getInt("mapred.submit.replication", 10); + hdfs.copyFromLocalFile(path, hdfsFilePath); + short replication = (short) job.getInt("mapred.submit.replication", 10); hdfs.setReplication(hdfsFilePath, replication); } - //LOG.info("=========Adding files to distributed cache================"); - FileStatus[] hashtableRemoteFiles = hdfs.listStatus(hdfsPath); for (int i = 0; i < hashtableRemoteFiles.length; i++) { FileStatus file = hashtableRemoteFiles[i]; Path path = file.getPath(); DistributedCache.addCacheFile(path.toUri(), job); - LOG.info("add 1 hashtable file to distributed cache: " - + path.toUri()); + + LOG.info("add 1 hashtable file to distributed cache: " + path.toUri()); } - //LOG.info("===========Finishing adding files to distributed cache=========="); } } @@ -670,8 +646,7 @@ // add to list of running jobs to kill in case of abnormal shutdown - runningJobKillURIs.put(rj.getJobID(), rj.getTrackingURL() - + "&action=kill"); + runningJobKillURIs.put(rj.getJobID(), rj.getTrackingURL() + "&action=kill"); ExecDriverTaskHandle th = new ExecDriverTaskHandle(jc, rj); jobInfo(rj); @@ -701,15 +676,14 @@ // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils - console.printError(mesg, "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); success = false; returnVal = 1; } finally { Utilities.clearMapRedWork(job); try { - if(ctxCreated) { + if (ctxCreated) { ctx.clear(); } @@ -728,8 +702,7 @@ if (rj != null) { JobCloseFeedBack feedBack = new JobCloseFeedBack(); if (work.getAliasToWork() != null) { - for (Operator op : work.getAliasToWork() - .values()) { + for (Operator op : work.getAliasToWork().values()) { op.jobClose(job, success, feedBack); } } @@ -742,10 +715,8 @@ if (success) { success = false; returnVal = 3; - String mesg = "Job Commit failed with exception '" - + Utilities.getNameMessage(e) + "'"; - console.printError(mesg, "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); + String mesg = "Job Commit failed with exception '" + Utilities.getNameMessage(e) + "'"; + console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); } } @@ -772,8 +743,7 @@ return "Ended Job = " + jobId; } - private String getTaskAttemptLogUrl(String taskTrackerHttpAddress, - String taskAttemptId) { + private String getTaskAttemptLogUrl(String taskTrackerHttpAddress, String taskAttemptId) { return taskTrackerHttpAddress + "/tasklog?taskid=" + taskAttemptId + "&all=true"; } @@ -786,12 +756,15 @@ this.jobId = jobId; logUrls = new HashSet(); } + public void addLogUrl(String logUrl) { logUrls.add(logUrl); } + public HashSet getLogUrls() { return logUrls; } + public String getJobId() { return jobId; } @@ -811,8 +784,7 @@ // Loop to get all task completion events because getTaskCompletionEvents // only returns a subset per call while (true) { - TaskCompletionEvent[] taskCompletions = - rj.getTaskCompletionEvents(startIndex); + TaskCompletionEvent[] taskCompletions = rj.getTaskCompletionEvents(startIndex); if (taskCompletions == null || taskCompletions.length == 0) { break; @@ -836,14 +808,13 @@ String jobId = taskJobIds[1]; TaskInfo ti = taskIdToInfo.get(taskId); - if(ti == null) { + if (ti == null) { ti = new TaskInfo(jobId); taskIdToInfo.put(taskId, ti); } // These tasks should have come from the same job. - assert(ti.getJobId() == jobId); - ti.getLogUrls().add( - getTaskAttemptLogUrl(t.getTaskTrackerHttp(), t.getTaskId())); + assert (ti.getJobId() == jobId); + ti.getLogUrls().add(getTaskAttemptLogUrl(t.getTaskTrackerHttp(), t.getTaskId())); // If a task failed, then keep track of the total number of failures // for that task (typically, a task gets re-run up to 4 times if it @@ -889,11 +860,10 @@ if (failures.get(task).intValue() == maxFailures) { TaskInfo ti = taskIdToInfo.get(task); String jobId = ti.getJobId(); - String taskUrl = jtUrl + "/taskdetails.jsp?jobid=" + jobId + "&tipid=" + - task.toString(); + String taskUrl = jtUrl + "/taskdetails.jsp?jobid=" + jobId + "&tipid=" + task.toString(); TaskLogProcessor tlp = new TaskLogProcessor(conf); - for(String logUrl : ti.getLogUrls()) { + for (String logUrl : ti.getLogUrls()) { tlp.addTaskAttemptLogUrl(logUrl); } @@ -910,7 +880,7 @@ sb.append("Task ID:\n " + task + "\n\n"); sb.append("URL:\n " + taskUrl + "\n"); - for(ErrorAndSolution e : errors) { + for (ErrorAndSolution e : errors) { sb.append("\n"); sb.append("Possible error:\n " + e.getError() + "\n\n"); sb.append("Solution:\n " + e.getSolution() + "\n"); @@ -934,36 +904,29 @@ } /** - * we are running the hadoop job via a sub-command. this typically - * happens when we are running jobs in local mode. the log4j in this - * mode is controlled as follows: - * 1. if the admin provides a log4j properties file especially for - * execution mode - then we pick that up - * 2. otherwise - we default to the regular hive log4j properties if - * one is supplied - * 3. if none of the above two apply - we don't do anything - the log4j - * properties would likely be determined by hadoop. + * we are running the hadoop job via a sub-command. this typically happens when we are running + * jobs in local mode. the log4j in this mode is controlled as follows: 1. if the admin provides a + * log4j properties file especially for execution mode - then we pick that up 2. otherwise - we + * default to the regular hive log4j properties if one is supplied 3. if none of the above two + * apply - we don't do anything - the log4j properties would likely be determined by hadoop. * - * The intention behind providing a separate option #1 is to be able to - * collect hive run time logs generated in local mode in a separate - * (centralized) location if desired. This mimics the behavior of hive - * run time logs when running against a hadoop cluster where they are available + * The intention behind providing a separate option #1 is to be able to collect hive run time logs + * generated in local mode in a separate (centralized) location if desired. This mimics the + * behavior of hive run time logs when running against a hadoop cluster where they are available * on the tasktracker nodes. */ private static void setupChildLog4j(Configuration conf) { URL hive_l4j = ExecDriver.class.getClassLoader().getResource(SessionState.HIVE_EXEC_L4J); - if(hive_l4j == null) { - hive_l4j = ExecDriver.class.getClassLoader().getResource - (SessionState.HIVE_L4J); + if (hive_l4j == null) { + hive_l4j = ExecDriver.class.getClassLoader().getResource(SessionState.HIVE_L4J); } if (hive_l4j != null) { // setting queryid so that log4j configuration can use it to generate // per query log file - System.setProperty - (HiveConf.ConfVars.HIVEQUERYID.toString(), - HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID)); + System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), HiveConf.getVar(conf, + HiveConf.ConfVars.HIVEQUERYID)); LogManager.resetConfiguration(); PropertyConfigurator.configure(hive_l4j); } @@ -975,8 +938,7 @@ ArrayList jobConfArgs = new ArrayList(); boolean noLog = false; String files = null; - boolean localtask=false; - + boolean localtask = false; try { for (int i = 0; i < args.length; i++) { if (args[i].equals("-plan")) { @@ -987,7 +949,7 @@ noLog = true; } else if (args[i].equals("-files")) { files = args[++i]; - }else if(args[i].equals("-localtask")) { + } else if (args[i].equals("-localtask")) { localtask = true; } } @@ -996,11 +958,11 @@ printUsage(); } - JobConf conf ; - if(localtask) { - conf= new JobConf(MapredLocalTask.class); + JobConf conf; + if (localtask) { + conf = new JobConf(MapredLocalTask.class); } else { - conf= new JobConf(ExecDriver.class); + conf = new JobConf(ExecDriver.class); } StringBuilder sb = new StringBuilder("JobConf:\n"); @@ -1013,8 +975,8 @@ conf.set(key, value); sb.append(key).append("=").append(value).append("\n"); } catch (UnsupportedEncodingException e) { - System.err.println("Unexpected error " + e.getMessage() - + " while encoding " + one.substring(eqIndex + 1)); + System.err.println("Unexpected error " + e.getMessage() + " while encoding " + + one.substring(eqIndex + 1)); System.exit(3); } } @@ -1045,10 +1007,10 @@ // print out the location of the log file for the user so // that it's easy to find reason for local mode execution failures - for (Appender appender: Collections.list - ((Enumeration)LogManager.getRootLogger().getAllAppenders())) { + for (Appender appender : Collections.list((Enumeration) LogManager.getRootLogger() + .getAllAppenders())) { if (appender instanceof FileAppender) { - console.printInfo("Execution log at: " + ((FileAppender)appender).getFile()); + console.printInfo("Execution log at: " + ((FileAppender) appender).getFile()); } } @@ -1085,11 +1047,11 @@ throw new HiveException(e.getMessage(), e); } int ret; - if(localtask) { + if (localtask) { memoryMXBean = ManagementFactory.getMemoryMXBean(); MapredLocalWork plan = Utilities.deserializeMapRedLocalWork(pathData, conf); MapredLocalTask ed = new MapredLocalTask(plan, conf, isSilent); - ret= ed.executeFromChildJVM(new DriverContext()); + ret = ed.executeFromChildJVM(new DriverContext()); } else { MapredWork plan = Utilities.deserializeMapRedWork(pathData, conf); @@ -1103,8 +1065,8 @@ } /** - * Given a Hive Configuration object - generate a command line fragment for - * passing such configuration information to ExecDriver. + * Given a Hive Configuration object - generate a command line fragment for passing such + * configuration information to ExecDriver. */ public static String generateCmdLine(HiveConf hconf) { try { @@ -1117,8 +1079,7 @@ for (Object one : deltaP.keySet()) { String oneProp = (String) one; - if (hadoopLocalMode - && (oneProp.equals(hadoopSysDir) || oneProp.equals(hadoopWorkDir))) { + if (hadoopLocalMode && (oneProp.equals(hadoopSysDir) || oneProp.equals(hadoopWorkDir))) { continue; } @@ -1139,15 +1100,15 @@ sb.append("-jobconf "); sb.append(hadoopSysDir); sb.append("="); - sb.append(URLEncoder.encode(hconf.get(hadoopSysDir) + "/" - + Utilities.randGen.nextInt(), "UTF-8")); + sb.append(URLEncoder.encode(hconf.get(hadoopSysDir) + "/" + Utilities.randGen.nextInt(), + "UTF-8")); sb.append(" "); sb.append("-jobconf "); sb.append(hadoopWorkDir); sb.append("="); - sb.append(URLEncoder.encode(hconf.get(hadoopWorkDir) + "/" - + Utilities.randGen.nextInt(), "UTF-8")); + sb.append(URLEncoder.encode(hconf.get(hadoopWorkDir) + "/" + Utilities.randGen.nextInt(), + "UTF-8")); } return sb.toString(); @@ -1170,9 +1131,8 @@ /** * Handle a empty/null path for a given alias. */ - private int addInputPath(String path, JobConf job, MapredWork work, - String hiveScratchDir, int numEmptyPaths, boolean isEmptyPath, - String alias) throws Exception { + private int addInputPath(String path, JobConf job, MapredWork work, String hiveScratchDir, + int numEmptyPaths, boolean isEmptyPath, String alias) throws Exception { // either the directory does not exist or it is empty assert path == null || isEmptyPath; @@ -1184,7 +1144,7 @@ outFileFormat = partDesc.getOutputFileFormatClass(); nonNative = partDesc.getTableDesc().isNonNative(); } else { - TableDesc tableDesc = work.getAliasToPartnInfo().get(alias).getTableDesc(); + TableDesc tableDesc = work.getAliasToPartnInfo().get(alias).getTableDesc(); outFileFormat = tableDesc.getOutputFileFormatClass(); nonNative = tableDesc.isNonNative(); } @@ -1200,10 +1160,6 @@ Path newPath = new Path(newDir); FileSystem fs = newPath.getFileSystem(job); fs.mkdirs(newPath); - //Qualify the path against the filesystem. The user configured path might contain default port which is skipped - //in the file status. This makes sure that all paths which goes into PathToPartitionInfo are always listed status - //filepath. - newPath = fs.makeQualified(newPath); String newFile = newDir + File.separator + "emptyFile"; Path newFilePath = new Path(newFile); @@ -1237,15 +1193,14 @@ work.setPathToPartitionInfo(pathToPartitionInfo); String onefile = newPath.toString(); - RecordWriter recWriter = outFileFormat.newInstance().getHiveRecordWriter( - job, newFilePath, Text.class, false, new Properties(), null); + RecordWriter recWriter = outFileFormat.newInstance().getHiveRecordWriter(job, newFilePath, + Text.class, false, new Properties(), null); recWriter.close(false); FileInputFormat.addInputPaths(job, onefile); return numEmptyPaths; } - private void addInputPaths(JobConf job, MapredWork work, String hiveScratchDir) - throws Exception { + private void addInputPaths(JobConf job, MapredWork work, String hiveScratchDir) throws Exception { int numEmptyPaths = 0; List pathsProcessed = new ArrayList(); @@ -1282,8 +1237,8 @@ // Create a empty file if the directory is empty for (String emptyPath : emptyPaths) { - numEmptyPaths = addInputPath(emptyPath, job, work, hiveScratchDir, - numEmptyPaths, true, oneAlias); + numEmptyPaths = addInputPath(emptyPath, job, work, hiveScratchDir, numEmptyPaths, true, + oneAlias); } // If the query references non-existent partitions @@ -1295,8 +1250,8 @@ // If T is empty and T2 contains 100 rows, the user expects: 0, 100 (2 // rows) if (path == null) { - numEmptyPaths = addInputPath(null, job, work, hiveScratchDir, - numEmptyPaths, false, oneAlias); + numEmptyPaths = addInputPath(null, job, work, hiveScratchDir, numEmptyPaths, false, + oneAlias); } } } @@ -1315,15 +1270,15 @@ protected void localizeMRTmpFilesImpl(Context ctx) { // localize any map-reduce input paths - ctx.localizeKeys((Map)((Object)work.getPathToAliases())); - ctx.localizeKeys((Map)((Object)work.getPathToPartitionInfo())); + ctx.localizeKeys((Map) ((Object) work.getPathToAliases())); + ctx.localizeKeys((Map) ((Object) work.getPathToPartitionInfo())); // localize any input paths for maplocal work MapredLocalWork l = work.getMapLocalWork(); if (l != null) { Map m = l.getAliasToFetchWork(); if (m != null) { - for (FetchWork fw: m.values()) { + for (FetchWork fw : m.values()) { String s = fw.getTblDir(); if ((s != null) && ctx.isMRTmpFileURI(s)) { fw.setTblDir(ctx.localizeMRTmpFileURI(s)); @@ -1335,22 +1290,21 @@ // fix up outputs Map> pa = work.getPathToAliases(); if (pa != null) { - for (List ls: pa.values()) { - for (String a: ls) { - ArrayList> opList = new - ArrayList> (); + for (List ls : pa.values()) { + for (String a : ls) { + ArrayList> opList = new ArrayList>(); opList.add(work.getAliasToWork().get(a)); while (!opList.isEmpty()) { Operator op = opList.remove(0); if (op instanceof FileSinkOperator) { - FileSinkDesc fdesc = ((FileSinkOperator)op).getConf(); + FileSinkDesc fdesc = ((FileSinkOperator) op).getConf(); String s = fdesc.getDirName(); if ((s != null) && ctx.isMRTmpFileURI(s)) { fdesc.setDirName(ctx.localizeMRTmpFileURI(s)); } - ((FileSinkOperator)op).setConf(fdesc); + ((FileSinkOperator) op).setConf(fdesc); } if (op.getChildOperators() != null) { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java (working copy) @@ -437,9 +437,10 @@ boolean newObjNull) { // newObj is null if is already null or // if the row corresponding to the left alias does not pass through filter - newObjNull = newObjNull || - ((BooleanWritable) (intObj.getObjs()[left].get( - joinValues.get(order[left]).size()))).get(); + int filterIndex = joinValues.get(order[left]).size(); + if(filterIndex < intObj.getObjs()[left].size()) { + newObjNull = newObjNull || ((BooleanWritable) (intObj.getObjs()[left].get(filterIndex))).get(); + } Iterator nullsIter = inputNulls.iterator(); while (nullsIter.hasNext()) { Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java (working copy) @@ -31,7 +31,7 @@ public interface ConditionalResolver { /** * All conditional resolvers implement this interface. - * + * * @param conf * configuration * @param ctx @@ -39,4 +39,6 @@ * @return position of the task */ List> getTasks(HiveConf conf, Object ctx); + + Task getBackUpTask(HiveConf conf, Object ctx); } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java (working copy) @@ -122,7 +122,7 @@ // If the input file does not exist, replace it by a empty file Path dirPath = new Path(dirName); FileSystem inpFs = dirPath.getFileSystem(conf); - + if (inpFs.exists(dirPath)) { DynamicPartitionCtx dpCtx = ctx.getDPCtx(); boolean doMerge = false; @@ -202,4 +202,8 @@ resTsks.add(ctx.getListTasks().get(0)); return resTsks; } + + public Task getBackUpTask(HiveConf conf, Object ctx){ + return null; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java (working copy) @@ -112,5 +112,8 @@ } return resTsks; } + public Task getBackUpTask(HiveConf conf, Object ctx){ + return null; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java (revision 0) @@ -0,0 +1,180 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * ConditionalResolverSkewJoin. + * + */ +public class ConditionalResolverCommonJoin implements ConditionalResolver, Serializable { + private static final long serialVersionUID = 1L; + + /** + * ConditionalResolverSkewJoinCtx. + * + */ + public static class ConditionalResolverCommonJoinCtx implements Serializable { + private static final long serialVersionUID = 1L; + + private HashMap> aliasToTask; + private HashMap aliasToPath; + private Task commonJoinTask; + + + + public ConditionalResolverCommonJoinCtx() { + } + + public HashMap> getAliasToTask() { + return aliasToTask; + } + + public void setAliasToTask(HashMap> aliasToTask) { + this.aliasToTask = aliasToTask; + } + + public HashMap getAliasToPath() { + return aliasToPath; + } + + public void setAliasToPath(HashMap aliasToPath) { + this.aliasToPath = aliasToPath; + } + + public Task getCommonJoinTask() { + return commonJoinTask; + } + + public void setCommonJoinTask(Task commonJoinTask) { + this.commonJoinTask = commonJoinTask; + } + + } + + public ConditionalResolverCommonJoin() { + } + + @Override + public List> getTasks(HiveConf conf, Object objCtx) { + ConditionalResolverCommonJoinCtx ctx = (ConditionalResolverCommonJoinCtx) objCtx; + List> resTsks = new ArrayList>(); + + // get aliasToPath and pass it to the heuristic + HashMap aliasToPath = ctx.getAliasToPath(); + String bigTableAlias = this.resolveMapJoinTask(aliasToPath, conf); + + if (bigTableAlias == null) { + // run common join task + resTsks.add(ctx.getCommonJoinTask()); + } else { + // run the map join task + resTsks.add(ctx.getAliasToTask().get(bigTableAlias)); + } + + return resTsks; + } + + public Task getBackUpTask(HiveConf conf, Object objCtx) { + ConditionalResolverCommonJoinCtx ctx = (ConditionalResolverCommonJoinCtx) objCtx; + Task resTsk = ctx.getCommonJoinTask(); + return resTsk; + } + + private String resolveMapJoinTask(HashMap aliasToPath, HiveConf conf) { + // for the full out join; return null directlly + if (aliasToPath.size() == 0) { + return null; + } + + // generate file size to alias mapping; but connot set file size as key, + // using 2 list to keep mapping + List aliasList = new ArrayList(); + List fileSizeList = new ArrayList(); + + try { + for (Map.Entry entry : aliasToPath.entrySet()) { + String alias = entry.getKey(); + String pathStr = entry.getValue(); + + Path path = new Path(pathStr); + FileSystem fs = path.getFileSystem(conf); + FileStatus[] fstatus = fs.listStatus(path); + long fileSize = 0; + for (int i = 0; i < fstatus.length; i++) { + fileSize += fstatus[i].getLen(); + } + if (fileSize == 0) { + throw new HiveException("Input file size is 0"); + } + + // put into list and sorted set + aliasList.add(alias); + fileSizeList.add(fileSize); + + } + // sorted based file size + List sortedList = new ArrayList(fileSizeList); + Collections.sort(sortedList); + + // get big table file size and small table file size summary + long bigTableFileSize = 0; + long smallTablesFileSizeSum = 0; + String bigTableFileAlias = null; + int size = sortedList.size(); + + // Iterate the sorted_set to get big/small table file size + for (int index = 0; index < sortedList.size(); index++) { + Long key = sortedList.get(index); + int i = fileSizeList.indexOf(key); + String alias = aliasList.get(i); + + if (index != (size - 1)) { + smallTablesFileSizeSum += key.longValue(); + } else { + bigTableFileSize += key.longValue(); + bigTableFileAlias = alias; + } + } + + // compare with threshold + long threshold = HiveConf.getLongVar(conf, HiveConf.ConfVars.HIVESMALLTABLESFILESIZE); + if (smallTablesFileSizeSum <= threshold) { + return bigTableFileAlias; + } else { + return null; + } + } catch (Exception e) { + e.printStackTrace(); + return null; + } + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/HashTableSinkDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/HashTableSinkDesc.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/HashTableSinkDesc.java (working copy) @@ -68,7 +68,9 @@ private Map> keys; private TableDesc keyTblDesc; private List valueTblDescs; + private List valueTblFilteredDescs; + private int posBigTable; private Map> retainList; @@ -100,6 +102,7 @@ this.keys = clone.getKeys(); this.keyTblDesc = clone.getKeyTblDesc(); this.valueTblDescs = clone.getValueTblDescs(); + this.valueTblFilteredDescs = clone.getValueFilteredTblDescs(); this.posBigTable = clone.getPosBigTable(); this.retainList = clone.getRetainList(); this.bigTableAlias = clone.getBigTableAlias(); @@ -186,6 +189,15 @@ return filters; } + + public List getValueTblFilteredDescs() { + return valueTblFilteredDescs; + } + + public void setValueTblFilteredDescs(List valueTblFilteredDescs) { + this.valueTblFilteredDescs = valueTblFilteredDescs; + } + @Override public void setFilters(Map> filters) { this.filters = filters; Index: ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java (working copy) @@ -27,6 +27,8 @@ import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.parse.OpParseContext; +import org.apache.hadoop.hive.ql.parse.QBJoinTree; /** * MapredWork. @@ -69,6 +71,11 @@ private String tmpHDFSFileURI; + private LinkedHashMap, OpParseContext> opParseCtxMap; + + private QBJoinTree joinTree; + + public MapredWork() { aliasToPartnInfo = new LinkedHashMap(); } @@ -340,4 +347,23 @@ public void setTmpHDFSFileURI(String tmpHDFSFileURI) { this.tmpHDFSFileURI = tmpHDFSFileURI; } + + + public QBJoinTree getJoinTree() { + return joinTree; + } + + public void setJoinTree(QBJoinTree joinTree) { + this.joinTree = joinTree; + } + + public LinkedHashMap, OpParseContext> getOpParseCtxMap() { + return opParseCtxMap; + } + + public void setOpParseCtxMap( + LinkedHashMap, OpParseContext> opParseCtxMap) { + this.opParseCtxMap = opParseCtxMap; + } + } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java (working copy) @@ -39,6 +39,7 @@ private Map> keys; private TableDesc keyTblDesc; private List valueTblDescs; + private List valueFilteredTblDescs; private int posBigTable; @@ -67,13 +68,14 @@ public MapJoinDesc(final Map> keys, final TableDesc keyTblDesc, final Map> values, - final List valueTblDescs, List outputColumnNames, + final List valueTblDescs,final List valueFilteredTblDescs, List outputColumnNames, final int posBigTable, final JoinCondDesc[] conds, final Map> filters, boolean noOuterJoin) { super(values, outputColumnNames, noOuterJoin, conds, filters); this.keys = keys; this.keyTblDesc = keyTblDesc; this.valueTblDescs = valueTblDescs; + this.valueFilteredTblDescs = valueFilteredTblDescs; this.posBigTable = posBigTable; this.bucketFileNameMapping = new LinkedHashMap(); initRetainExprList(); @@ -148,6 +150,14 @@ this.keyTblDesc = keyTblDesc; } + public List getValueFilteredTblDescs() { + return valueFilteredTblDescs; + } + + public void setValueFilteredTblDescs(List valueFilteredTblDescs) { + this.valueFilteredTblDescs = valueFilteredTblDescs; + } + /** * @return the valueTblDescs */ Index: ql/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java (working copy) @@ -30,7 +30,7 @@ /** * Dispatcher function. - * + * * @param nd * operator to process. * @param stack @@ -43,4 +43,5 @@ */ Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws SemanticException; + } Index: ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java (revision 0) @@ -0,0 +1,200 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.lib; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.Stack; + +import org.apache.hadoop.hive.ql.exec.ConditionalTask; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * base class for operator graph walker this class takes list of starting ops + * and walks them one by one. it maintains list of walked operators + * (dispatchedList) and a list of operators that are discovered but not yet + * dispatched + */ +public class TaskGraphWalker implements GraphWalker { + + + public class TaskGraphWalkerContext{ + private final HashMap reMap; + + public TaskGraphWalkerContext(HashMap reMap){ + this.reMap = reMap; + } + public void addToDispatchList(Node dispatchedObj){ + if(dispatchedObj != null) { + retMap.put(dispatchedObj, null); + } + } + } + + protected Stack opStack; + private final List toWalk = new ArrayList(); + private final HashMap retMap = new HashMap(); + private final Dispatcher dispatcher; + private final TaskGraphWalkerContext walkerCtx; + + /** + * Constructor. + * + * @param disp + * dispatcher to call for each op encountered + */ + public TaskGraphWalker(Dispatcher disp) { + dispatcher = disp; + opStack = new Stack(); + walkerCtx = new TaskGraphWalkerContext(retMap); + } + + /** + * @return the toWalk + */ + public List getToWalk() { + return toWalk; + } + + /** + * @return the doneList + */ + public Set getDispatchedList() { + return retMap.keySet(); + } + + /** + * Dispatch the current operator. + * + * @param nd + * node being walked + * @param ndStack + * stack of nodes encountered + * @throws SemanticException + */ + public void dispatch(Node nd, Stack ndStack,TaskGraphWalkerContext walkerCtx) throws SemanticException { + Object[] nodeOutputs = null; + if (nd.getChildren() != null) { + nodeOutputs = new Object[nd.getChildren().size()+1]; + nodeOutputs[0] = walkerCtx; + int i = 1; + for (Node child : nd.getChildren()) { + nodeOutputs[i++] = retMap.get(child); + } + }else{ + nodeOutputs = new Object[1]; + nodeOutputs[0] = walkerCtx; + } + + Object retVal = dispatcher.dispatch(nd, ndStack, nodeOutputs); + retMap.put(nd, retVal); + } + + public void dispatch(Node nd, Stack ndStack) throws SemanticException { + Object[] nodeOutputs = null; + if (nd.getChildren() != null) { + nodeOutputs = new Object[nd.getChildren().size()]; + int i = 1; + for (Node child : nd.getChildren()) { + nodeOutputs[i++] = retMap.get(child); + } + } + + Object retVal = dispatcher.dispatch(nd, ndStack, nodeOutputs); + retMap.put(nd, retVal); + } + + /** + * starting point for walking. + * + * @throws SemanticException + */ + public void startWalking(Collection startNodes, + HashMap nodeOutput) throws SemanticException { + toWalk.addAll(startNodes); + while (toWalk.size() > 0) { + Node nd = toWalk.remove(0); + walk(nd); + if (nodeOutput != null) { + nodeOutput.put(nd, retMap.get(nd)); + } + } + } + + /** + * walk the current operator and its descendants. + * + * @param nd + * current operator in the graph + * @throws SemanticException + */ + public void walk(Node nd) throws SemanticException { + if(!(nd instanceof Task)){ + throw new SemanticException("Task Graph Walker only walk fro Task Graph"); + } + + if (getDispatchedList().contains(nd)) { + return; + } + if (opStack.empty() || nd != opStack.peek()) { + opStack.push(nd); + } + + List> nextTaskList = null; + Set> nextTaskSet = new HashSet>(); + List> taskListInConditionalTask = null; + + + if(nd instanceof ConditionalTask ){ + taskListInConditionalTask = ((ConditionalTask) nd).getListTasks(); + for(Task tsk: taskListInConditionalTask){ + List> childTask = tsk.getChildTasks(); + if(childTask != null){ + nextTaskSet.addAll(tsk.getChildTasks()); + } + } + if(nextTaskSet.size()>0){ + nextTaskList = new ArrayList>(); + for(Task tsk:nextTaskSet ){ + nextTaskList.add(tsk); + } + } + }else{ + nextTaskList = ((Task)nd).getChildTasks(); + } + + if ((nextTaskList == null) + || getDispatchedList().containsAll(nextTaskList)) { + dispatch(nd, opStack,this.walkerCtx); + opStack.pop(); + return; + } + // add children, self to the front of the queue in that order + getToWalk().add(0, nd); + getToWalk().removeAll(nextTaskList); + getToWalk().addAll(0, nextTaskList); + + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java (working copy) @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.parse; +import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -25,9 +26,10 @@ /** * Internal representation of the join tree. - * + * */ -public class QBJoinTree { +public class QBJoinTree implements Serializable{ + private static final long serialVersionUID = 1L; private String leftAlias; private String[] rightAliases; private String[] leftAliases; @@ -70,7 +72,7 @@ /** * returns left alias if any - this is used for merging later on. - * + * * @return left alias if any */ public String getLeftAlias() { @@ -79,7 +81,7 @@ /** * set left alias for the join expression. - * + * * @param leftAlias * String */ @@ -229,7 +231,7 @@ /** * Insert only a key to the semijoin table name to column names map. - * + * * @param alias * table name alias. */ @@ -241,7 +243,7 @@ /** * Remeber the mapping of table alias to set of columns. - * + * * @param alias * @param columns */ @@ -256,7 +258,7 @@ /** * Remeber the mapping of table alias to set of columns. - * + * * @param alias * @param columns */ @@ -277,7 +279,7 @@ /** * Merge the rhs tables from another join tree. - * + * * @param src * the source join tree */ Index: ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java (working copy) @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.parse; +import java.io.Serializable; import java.util.ArrayList; import org.antlr.runtime.Token; @@ -26,9 +27,10 @@ /** * @author athusoo - * + * */ -public class ASTNode extends CommonTree implements Node { +public class ASTNode extends CommonTree implements Node,Serializable { + private static final long serialVersionUID = 1L; private ASTNodeOrigin origin; @@ -37,7 +39,7 @@ /** * Constructor. - * + * * @param t * Token for the CommonTree Node */ @@ -47,7 +49,7 @@ /* * (non-Javadoc) - * + * * @see org.apache.hadoop.hive.ql.lib.Node#getChildren() */ public ArrayList getChildren() { @@ -65,7 +67,7 @@ /* * (non-Javadoc) - * + * * @see org.apache.hadoop.hive.ql.lib.Node#getName() */ public String getName() { Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -1297,7 +1297,7 @@ throws SemanticException { OpParseContext inputCtx = opParseCtx.get(input); - RowResolver inputRR = inputCtx.getRR(); + RowResolver inputRR = inputCtx.getRowResolver(); Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( new FilterDesc(genExprNodeDesc(condn, inputRR), false), new RowSchema( inputRR.getColumnInfos()), input), inputRR); @@ -1599,7 +1599,7 @@ StringBuilder inpColumns = new StringBuilder(); StringBuilder inpColumnTypes = new StringBuilder(); - ArrayList inputSchema = opParseCtx.get(input).getRR() + ArrayList inputSchema = opParseCtx.get(input).getRowResolver() .getColumnInfos(); for (int i = 0; i < inputSchema.size(); ++i) { if (i != 0) { @@ -1838,7 +1838,7 @@ ASTNode trfm = null; String alias = qb.getParseInfo().getAlias(); Integer pos = Integer.valueOf(0); - RowResolver inputRR = opParseCtx.get(input).getRR(); + RowResolver inputRR = opParseCtx.get(input).getRowResolver(); // SELECT * or SELECT TRANSFORM(*) boolean selectStar = false; int posn = 0; @@ -2178,7 +2178,7 @@ Map genericUDAFEvaluators) throws SemanticException { RowResolver groupByInputRowResolver = opParseCtx - .get(reduceSinkOperatorInfo).getRR(); + .get(reduceSinkOperatorInfo).getRowResolver(); RowResolver groupByOutputRowResolver = new RowResolver(); groupByOutputRowResolver.setIsExprResolver(true); ArrayList groupByKeys = new ArrayList(); @@ -2303,7 +2303,7 @@ boolean distPartAgg) throws SemanticException { ArrayList outputColumnNames = new ArrayList(); RowResolver groupByInputRowResolver = opParseCtx - .get(reduceSinkOperatorInfo).getRR(); + .get(reduceSinkOperatorInfo).getRowResolver(); RowResolver groupByOutputRowResolver = new RowResolver(); groupByOutputRowResolver.setIsExprResolver(true); ArrayList groupByKeys = new ArrayList(); @@ -2452,7 +2452,7 @@ throws SemanticException { RowResolver groupByInputRowResolver = opParseCtx.get(inputOperatorInfo) - .getRR(); + .getRowResolver(); QBParseInfo parseInfo = qb.getParseInfo(); RowResolver groupByOutputRowResolver = new RowResolver(); groupByOutputRowResolver.setIsExprResolver(true); @@ -2569,7 +2569,7 @@ boolean mapAggrDone) throws SemanticException { RowResolver reduceSinkInputRowResolver = opParseCtx.get(inputOperatorInfo) - .getRR(); + .getRowResolver(); QBParseInfo parseInfo = qb.getParseInfo(); RowResolver reduceSinkOutputRowResolver = new RowResolver(); reduceSinkOutputRowResolver.setIsExprResolver(true); @@ -2713,7 +2713,7 @@ String dest, Operator groupByOperatorInfo, int numPartitionFields, int numReducers) throws SemanticException { RowResolver reduceSinkInputRowResolver2 = opParseCtx.get( - groupByOperatorInfo).getRR(); + groupByOperatorInfo).getRowResolver(); RowResolver reduceSinkOutputRowResolver2 = new RowResolver(); reduceSinkOutputRowResolver2.setIsExprResolver(true); Map colExprMap = new HashMap(); @@ -2784,7 +2784,7 @@ Map genericUDAFEvaluators) throws SemanticException { RowResolver groupByInputRowResolver2 = opParseCtx.get( - reduceSinkOperatorInfo2).getRR(); + reduceSinkOperatorInfo2).getRowResolver(); RowResolver groupByOutputRowResolver2 = new RowResolver(); groupByOutputRowResolver2.setIsExprResolver(true); ArrayList groupByKeys = new ArrayList(); @@ -3098,7 +3098,7 @@ genericUDAFEvaluators); groupOpToInputTables.put(groupByOperatorInfo, opParseCtx.get( - inputOperatorInfo).getRR().getTableNames()); + inputOperatorInfo).getRowResolver().getTableNames()); int numReducers = -1; // Optimize the scenario when there are no grouping keys - only 1 reducer is @@ -3169,7 +3169,7 @@ genericUDAFEvaluators); groupOpToInputTables.put(groupByOperatorInfo, opParseCtx.get( - inputOperatorInfo).getRR().getTableNames()); + inputOperatorInfo).getRowResolver().getTableNames()); // Optimize the scenario when there are no grouping keys and no distinct - 2 // map-reduce jobs are not needed // For eg: select count(1) from T where t.ds = .... @@ -3405,7 +3405,7 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) throws SemanticException { - RowResolver inputRR = opParseCtx.get(input).getRR(); + RowResolver inputRR = opParseCtx.get(input).getRowResolver(); QBMetaData qbm = qb.getMetaData(); Integer dest_type = qbm.getDestTypeForAlias(dest); @@ -3705,7 +3705,7 @@ } input = genConversionSelectOperator(dest, qb, input, table_desc, dpCtx); - inputRR = opParseCtx.get(input).getRR(); + inputRR = opParseCtx.get(input).getRowResolver(); ArrayList vecCol = new ArrayList(); @@ -3789,7 +3789,7 @@ // Check column number List tableFields = oi.getAllStructFieldRefs(); boolean dynPart = HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING); - ArrayList rowFields = opParseCtx.get(input).getRR() + ArrayList rowFields = opParseCtx.get(input).getRowResolver() .getColumnInfos(); int inColumnCnt = rowFields.size(); int outColumnCnt = tableFields.size(); @@ -3899,7 +3899,7 @@ // write into a local file and then have a map-only job. // Add the limit operator to get the value fields - RowResolver inputRR = opParseCtx.get(input).getRR(); + RowResolver inputRR = opParseCtx.get(input).getRowResolver(); Operator limitMap = putOpInsertMap(OperatorFactory.getAndMakeChild( new LimitDesc(limit), new RowSchema(inputRR.getColumnInfos()), input), inputRR); @@ -3944,7 +3944,7 @@ // resulting output object inspector can be used to make the RowResolver // for the UDTF operator - RowResolver selectRR = opParseCtx.get(input).getRR(); + RowResolver selectRR = opParseCtx.get(input).getRowResolver(); ArrayList inputCols = selectRR.getColumnInfos(); // Create the object inspector for the input columns and initialize the UDTF @@ -4023,7 +4023,7 @@ private ArrayList getParitionColsFromBucketCols(String dest, QB qb, Table tab, TableDesc table_desc, Operator input, boolean convert) throws SemanticException { - RowResolver inputRR = opParseCtx.get(input).getRR(); + RowResolver inputRR = opParseCtx.get(input).getRowResolver(); List tabBucketCols = tab.getBucketCols(); List tabCols = tab.getCols(); @@ -4057,7 +4057,7 @@ } List tableFields = oi.getAllStructFieldRefs(); - ArrayList rowFields = opParseCtx.get(input).getRR() + ArrayList rowFields = opParseCtx.get(input).getRowResolver() .getColumnInfos(); // Check column type @@ -4095,7 +4095,7 @@ private ArrayList getSortCols(String dest, QB qb, Table tab, TableDesc table_desc, Operator input, boolean convert) throws SemanticException { - RowResolver inputRR = opParseCtx.get(input).getRR(); + RowResolver inputRR = opParseCtx.get(input).getRowResolver(); List tabSortCols = tab.getSortCols(); List tabCols = tab.getCols(); @@ -4122,7 +4122,7 @@ ArrayList partitionCols, int numReducers) throws SemanticException { - RowResolver inputRR = opParseCtx.get(input).getRR(); + RowResolver inputRR = opParseCtx.get(input).getRowResolver(); // For the generation of the values expression just get the inputs // signature and generate field expressions for those @@ -4181,7 +4181,7 @@ private Operator genReduceSinkPlan(String dest, QB qb, Operator input, int numReducers) throws SemanticException { - RowResolver inputRR = opParseCtx.get(input).getRR(); + RowResolver inputRR = opParseCtx.get(input).getRowResolver(); // First generate the expression for the partition and sort keys // The cluster by clause / distribute by clause has the aliases for @@ -4320,7 +4320,7 @@ // check whether this input operator produces output if (omitOpts == null || !omitOpts.contains(pos)) { // prepare output descriptors for the input opt - RowResolver inputRS = opParseCtx.get(input).getRR(); + RowResolver inputRS = opParseCtx.get(input).getRowResolver(); Iterator keysIter = inputRS.getTableNames().iterator(); Set aliases = posToAliasMap.get(pos); if (aliases == null) { @@ -4379,7 +4379,7 @@ @SuppressWarnings("nls") private Operator genJoinReduceSinkChild(QB qb, QBJoinTree joinTree, Operator child, String srcName, int pos) throws SemanticException { - RowResolver inputRS = opParseCtx.get(child).getRR(); + RowResolver inputRS = opParseCtx.get(child).getRowResolver(); RowResolver outputRS = new RowResolver(); ArrayList outputColumns = new ArrayList(); ArrayList reduceKeys = new ArrayList(); @@ -4515,7 +4515,7 @@ private Operator insertSelectForSemijoin(ArrayList fields, Operator input) throws SemanticException { - RowResolver inputRR = opParseCtx.get(input).getRR(); + RowResolver inputRR = opParseCtx.get(input).getRowResolver(); ArrayList colList = new ArrayList(); ArrayList columnNames = new ArrayList(); @@ -4547,7 +4547,7 @@ throws SemanticException { RowResolver groupByInputRowResolver = opParseCtx.get(inputOperatorInfo) - .getRR(); + .getRowResolver(); RowResolver groupByOutputRowResolver = new RowResolver(); ArrayList groupByKeys = new ArrayList(); ArrayList outputColumnNames = new ArrayList(); @@ -5121,7 +5121,7 @@ private Operator insertSelectAllPlanForGroupBy(String dest, Operator input) throws SemanticException { OpParseContext inputCtx = opParseCtx.get(input); - RowResolver inputRR = inputCtx.getRR(); + RowResolver inputRR = inputCtx.getRowResolver(); ArrayList columns = inputRR.getColumnInfos(); ArrayList colList = new ArrayList(); ArrayList columnNames = new ArrayList(); @@ -5141,7 +5141,7 @@ // Return the common distinct expression // There should be more than 1 destination, with group bys in all of them. private List getCommonDistinctExprs(QB qb, Operator input) { - RowResolver inputRR = opParseCtx.get(input).getRR(); + RowResolver inputRR = opParseCtx.get(input).getRowResolver(); QBParseInfo qbp = qb.getParseInfo(); TreeSet ks = new TreeSet(); @@ -5214,7 +5214,7 @@ ks.addAll(qbp.getClauseNames()); // Pass the entire row - RowResolver inputRR = opParseCtx.get(input).getRR(); + RowResolver inputRR = opParseCtx.get(input).getRowResolver(); RowResolver reduceSinkOutputRowResolver = new RowResolver(); reduceSinkOutputRowResolver.setIsExprResolver(true); ArrayList reduceKeys = new ArrayList(); @@ -5318,7 +5318,7 @@ if (optimizeMultiGroupBy) { curr = createCommonReduceSink(qb, input); - RowResolver currRR = opParseCtx.get(curr).getRR(); + RowResolver currRR = opParseCtx.get(curr).getRowResolver(); // create a forward operator input = putOpInsertMap(OperatorFactory.getAndMakeChild(new ForwardDesc(), new RowSchema(currRR.getColumnInfos()), curr), currRR); @@ -5417,7 +5417,7 @@ // change curr ops row resolver's tab aliases to query alias if it // exists if (qb.getParseInfo().getAlias() != null) { - RowResolver rr = opParseCtx.get(curr).getRR(); + RowResolver rr = opParseCtx.get(curr).getRowResolver(); RowResolver newRR = new RowResolver(); String alias = qb.getParseInfo().getAlias(); for (ColumnInfo colInfo : rr.getColumnInfos()) { @@ -5425,7 +5425,7 @@ String[] tmp = rr.reverseLookup(name); newRR.put(alias, tmp[1], colInfo); } - opParseCtx.get(curr).setRR(newRR); + opParseCtx.get(curr).setRowResolver(newRR); } } } @@ -5445,8 +5445,8 @@ // Currently, the unions are not merged - each union has only 2 parents. So, // a n-way union will lead to (n-1) union operators. // This can be easily merged into 1 union - RowResolver leftRR = opParseCtx.get(leftOp).getRR(); - RowResolver rightRR = opParseCtx.get(rightOp).getRR(); + RowResolver leftRR = opParseCtx.get(leftOp).getRowResolver(); + RowResolver rightRR = opParseCtx.get(rightOp).getRowResolver(); HashMap leftmap = leftRR.getFieldMap(leftalias); HashMap rightmap = rightRR.getFieldMap(rightalias); // make sure the schemas of both sides are the same @@ -5698,7 +5698,7 @@ // Add a mapping from the table scan operator to Table topToTable.put((TableScanOperator) top, tab); } else { - rwsch = opParseCtx.get(top).getRR(); + rwsch = opParseCtx.get(top).getRowResolver(); top.setChildOperators(null); } @@ -5989,7 +5989,7 @@ // RowResolver lvForwardRR = new RowResolver(); - RowResolver source = opParseCtx.get(op).getRR(); + RowResolver source = opParseCtx.get(op).getRowResolver(); for (ColumnInfo col : source.getColumnInfos()) { if(col.getIsVirtualCol() && col.isHiddenVirtualCol()) { continue; @@ -6007,7 +6007,7 @@ // give it the row first. // Get the all path by making a select(*). - RowResolver allPathRR = opParseCtx.get(lvForward).getRR(); + RowResolver allPathRR = opParseCtx.get(lvForward).getRowResolver(); //Operator allPath = op; Operator allPath = putOpInsertMap(OperatorFactory.getAndMakeChild( new SelectDesc(true), new RowSchema(allPathRR.getColumnInfos()), @@ -6020,7 +6020,7 @@ for (String udtfAlias : blankQb.getAliases()) { qb.addAlias(udtfAlias); } - RowResolver udtfPathRR = opParseCtx.get(udtfPath).getRR(); + RowResolver udtfPathRR = opParseCtx.get(udtfPath).getRowResolver(); // Merge the two into the lateral view join // The cols of the merged result will be the combination of both the @@ -6522,7 +6522,7 @@ // up with later. Operator sinkOp = genPlan(qb); resultSchema = - convertRowSchemaToViewSchema(opParseCtx.get(sinkOp).getRR()); + convertRowSchemaToViewSchema(opParseCtx.get(sinkOp).getRowResolver()); if (createVwDesc != null) { saveViewDefinition(); @@ -6850,7 +6850,7 @@ * Get the row resolver given an operator. */ public RowResolver getRowResolver(Operator opt) { - return opParseCtx.get(opt).getRR(); + return opParseCtx.get(opt).getRowResolver(); } /** Index: ql/src/java/org/apache/hadoop/hive/ql/parse/OpParseContext.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/OpParseContext.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/OpParseContext.java (working copy) @@ -17,14 +17,15 @@ */ package org.apache.hadoop.hive.ql.parse; - +import java.io.Serializable; /** * Implementation of the Operator Parse Context. It maintains the parse context * that may be needed by an operator. Currently, it only maintains the row * resolver. **/ -public class OpParseContext { +public class OpParseContext implements Serializable { + private static final long serialVersionUID = 1L; private RowResolver rr; // row resolver for the operator public OpParseContext() { @@ -41,7 +42,7 @@ /** * @return the row resolver */ - public RowResolver getRR() { + public RowResolver getRowResolver() { return rr; } @@ -49,7 +50,7 @@ * @param rr * the row resolver to set */ - public void setRR(RowResolver rr) { + public void setRowResolver(RowResolver rr) { this.rr = rr; } } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java (working copy) @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.parse; +import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedHashMap; @@ -31,16 +32,16 @@ /** * Implementation of the Row Resolver. - * + * */ -public class RowResolver { +public class RowResolver implements Serializable{ + private static final long serialVersionUID = 1L; + private RowSchema rowSchema; + private HashMap> rslvMap; - private final RowSchema rowSchema; - private final HashMap> rslvMap; + private HashMap invRslvMap; + private Map expressionMap; - private final HashMap invRslvMap; - private final Map expressionMap; - // TODO: Refactor this and do in a more object oriented manner private boolean isExprResolver; @@ -119,11 +120,11 @@ * row resolver and returns the match. It also throws an exception if the * column is found in multiple table aliases. If no match is found a null * values is returned. - * + * * This allows us to interpret both select t.c1 type of references and select * c1 kind of refereneces. The later kind are what we call non aliased column * references in the query. - * + * * @param tab_alias * The table alias to match (this is null if the column reference is * non aliased) @@ -222,4 +223,42 @@ } return sb.toString(); } + + public RowSchema getRowSchema() { + return rowSchema; + } + + public HashMap> getRslvMap() { + return rslvMap; + } + + public HashMap getInvRslvMap() { + return invRslvMap; + } + + public Map getExpressionMap() { + return expressionMap; + } + + public void setExprResolver(boolean isExprResolver) { + this.isExprResolver = isExprResolver; + } + + + public void setRowSchema(RowSchema rowSchema) { + this.rowSchema = rowSchema; + } + + public void setRslvMap(HashMap> rslvMap) { + this.rslvMap = rslvMap; + } + + public void setInvRslvMap(HashMap invRslvMap) { + this.invRslvMap = invRslvMap; + } + + public void setExpressionMap(Map expressionMap) { + this.expressionMap = expressionMap; + } + } Index: ql/src/java/org/apache/hadoop/hive/ql/Driver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java (revision 1034599) +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java (working copy) @@ -24,10 +24,10 @@ import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; -import java.util.HashSet; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -55,6 +55,7 @@ import org.apache.hadoop.hive.ql.exec.TaskRunner; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.history.HiveHistory.Keys; +import org.apache.hadoop.hive.ql.hooks.HookContext; import org.apache.hadoop.hive.ql.hooks.PostExecute; import org.apache.hadoop.hive.ql.hooks.PreExecute; import org.apache.hadoop.hive.ql.hooks.ReadEntity; @@ -87,7 +88,6 @@ import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; import org.apache.hadoop.hive.serde2.ByteStream; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.mapred.ClusterStatus; import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobConf; @@ -772,10 +772,12 @@ } resStream = null; + //create HookContext + HookContext hookContext = new HookContext(plan,conf); + // Get all the pre execution hooks and execute them. for (PreExecute peh : getPreExecHooks()) { - peh.run(SessionState.get(), plan.getInputs(), plan.getOutputs(), - ShimLoader.getHadoopShims().getUGIForConf(conf)); + peh.run(hookContext); } int jobs = Utilities.getMRTasks(plan.getRootTasks()).size(); @@ -823,15 +825,34 @@ int exitVal = tskRes.getExitVal(); if (exitVal != 0) { - // TODO: This error messaging is not very informative. Fix that. - errorMessage = "FAILED: Execution Error, return code " + exitVal + Task backupTask = driverCxt.getBackupTask(tsk); + if( backupTask != null) { + errorMessage = "FAILED: Execution Error, return code " + exitVal + " from " + tsk.getClass().getName(); - SQLState = "08S01"; - console.printError(errorMessage); - if (running.size() != 0) { - taskCleanup(); + console.printInfo(errorMessage); + System.out.println(errorMessage); + + errorMessage = "ATTEMPT: Execute BackupTask: " + backupTask.getClass().getName(); + console.printInfo(errorMessage); + System.out.println(errorMessage); + + //add backup task to runnable + if (DriverContext.isLaunchable(backupTask)) { + driverCxt.addToRunnable(backupTask); + } + continue; + + } else { + // TODO: This error messaging is not very informative. Fix that. + errorMessage = "FAILED: Execution Error, return code " + exitVal + + " from " + tsk.getClass().getName(); + SQLState = "08S01"; + console.printError(errorMessage); + if (running.size() != 0) { + taskCleanup(); + } + return 9; } - return 9; } if (SessionState.get() != null) { @@ -869,9 +890,7 @@ // Get all the post execution hooks and execute them. for (PostExecute peh : getPostExecHooks()) { - peh.run(SessionState.get(), plan.getInputs(), plan.getOutputs(), - (SessionState.get() != null ? SessionState.get().getLineageState().getLineageInfo() : null), - ShimLoader.getHadoopShims().getUGIForConf(conf)); + peh.run(hookContext); } if (SessionState.get() != null) {