diff --git itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMiniDruidCliDriver.java itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMiniDruidLlapCliDriver.java similarity index 94% rename from itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMiniDruidCliDriver.java rename to itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMiniDruidLlapCliDriver.java index fa75d65219..692879338e 100644 --- itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMiniDruidCliDriver.java +++ itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMiniDruidLlapCliDriver.java @@ -31,7 +31,8 @@ import java.util.List; @RunWith(Parameterized.class) -public class TestMiniDruidCliDriver { +public class TestMiniDruidLlapCliDriver +{ static CliAdapter adapter = new CliConfigs.MiniDruidCliConfig().getCliAdapter(); @@ -49,7 +50,7 @@ private String name; private File qfile; - public TestMiniDruidCliDriver(String name, File qfile) { + public TestMiniDruidLlapCliDriver(String name, File qfile) { this.name = name; this.qfile = qfile; } diff --git itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMiniDruidLlapLocalCliDriver.java itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMiniDruidLlapLocalCliDriver.java new file mode 100644 index 0000000000..7823e04310 --- /dev/null +++ itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMiniDruidLlapLocalCliDriver.java @@ -0,0 +1,46 @@ +package org.apache.hadoop.hive.cli; + +import org.apache.hadoop.hive.cli.control.CliAdapter; +import org.apache.hadoop.hive.cli.control.CliConfigs; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestRule; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.io.File; +import java.util.List; + +/** + * This is used only for dev debugging if needed + */ +@RunWith(Parameterized.class) +public class TestMiniDruidLlapLocalCliDriver { + static CliAdapter adapter = new CliConfigs.MiniDruidLlapLocalCliDriver().getCliAdapter(); + + @Parameterized.Parameters(name = "{0}") + public static List getParameters() throws Exception { + return adapter.getParameters(); + } + + @ClassRule + public static TestRule cliClassRule = adapter.buildClassRule(); + + @Rule + public TestRule cliTestRule = adapter.buildTestRule(); + + private String name; + private File qfile; + + public TestMiniDruidLlapLocalCliDriver(String name, File qfile) { + this.name = name; + this.qfile = qfile; + } + + @Test + public void testCliDriver() throws Exception { + adapter.runTest(name, qfile); + } + +} \ No newline at end of file diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java index d07599e96b..d18fed21e9 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java @@ -708,4 +708,33 @@ private void setHiveConfDir(MiniClusterType clusterType) { } } + public static class MiniDruidLlapLocalCliDriver extends AbstractCliConfig + { + + public MiniDruidLlapLocalCliDriver() + { + super(CoreCliDriver.class); + try { + setQueryDir("ql/src/test/queries/clientpositive"); + + includesFrom(testConfigProps, "druid.query.files"); + + setResultsDir("ql/src/test/results/clientpositive/druid"); + setLogDir("itests/qtest/target/tmp/log"); + + setInitScript("q_test_druid_init.sql"); + setCleanupScript("q_test_cleanup_druid.sql"); + setHiveConfDir("data/conf/llap"); + setClusterType(MiniClusterType.druidLocal); + setMetastoreType(MetastoreType.sql); + setFsType(QTestUtil.FsType.local); + setQueryDir("ql/src/test/queries/clientpositive"); + + } + catch (Exception e) { + throw new RuntimeException("can't construct cliconfig", e); + } + } + } + } diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index 98aae5c118..4ed062e09e 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -422,6 +422,7 @@ private void createRemoteDirs() { llap_local(CoreClusterType.TEZ, FsType.local), none(CoreClusterType.MR, FsType.local), druid(CoreClusterType.TEZ, FsType.hdfs), + druidLocal(CoreClusterType.TEZ, FsType.local), druidKafka(CoreClusterType.TEZ, FsType.hdfs), kafka(CoreClusterType.TEZ, FsType.hdfs); @@ -460,6 +461,8 @@ public static MiniClusterType valueForString(String type) { return llap_local; } else if (type.equals("druid")) { return druid; + } else if (type.equals("druidLocal")) { + return druidLocal; } else if (type.equals("druid-kafka")) { return druidKafka; } @@ -632,7 +635,9 @@ private void setupMiniCluster(HadoopShims shims, String confDir) throws String uriString = fs.getUri().toString(); - if (clusterType == MiniClusterType.druid || clusterType == MiniClusterType.druidKafka) { + if (clusterType == MiniClusterType.druid + || clusterType == MiniClusterType.druidKafka + || clusterType == MiniClusterType.druidLocal) { final String tempDir = System.getProperty("test.tmp.dir"); druidCluster = new MiniDruidCluster("mini-druid", logDir, @@ -672,15 +677,28 @@ private void setupMiniCluster(HadoopShims shims, String confDir) throws + "/tez-site.xml")); } int numTrackers = 2; - if (EnumSet.of(MiniClusterType.llap, MiniClusterType.llap_local).contains(clusterType)) { + if (EnumSet.of( + MiniClusterType.llap, + MiniClusterType.llap_local, + MiniClusterType.druidLocal, + MiniClusterType.druid + ).contains(clusterType)) { llapCluster = LlapItUtils.startAndGetMiniLlapCluster(conf, setup.zooKeeperCluster, confDir); } else { } - if (EnumSet.of(MiniClusterType.llap_local, MiniClusterType.tez_local).contains(clusterType)) { - mr = shims.getLocalMiniTezCluster(conf, clusterType == MiniClusterType.llap_local); + if (EnumSet.of(MiniClusterType.llap_local, MiniClusterType.tez_local, MiniClusterType.druidLocal) + .contains(clusterType)) { + mr = shims.getLocalMiniTezCluster(conf, + clusterType == MiniClusterType.llap_local + || clusterType == MiniClusterType.druidLocal + ); } else { - mr = shims.getMiniTezCluster(conf, numTrackers, uriString, - EnumSet.of(MiniClusterType.llap, MiniClusterType.llap_local).contains(clusterType)); + mr = shims.getMiniTezCluster( + conf, + numTrackers, + uriString, + EnumSet.of(MiniClusterType.llap, MiniClusterType.llap_local, MiniClusterType.druid).contains(clusterType) + ); } } else if (clusterType == MiniClusterType.miniSparkOnYarn) { mr = shims.getMiniSparkCluster(conf, 2, uriString, 1); diff --git ql/src/test/results/clientpositive/druid/druid_basic2.q.out ql/src/test/results/clientpositive/druid/druid_basic2.q.out index 88916b9d6c..444d91db32 100644 --- ql/src/test/results/clientpositive/druid/druid_basic2.q.out +++ ql/src/test/results/clientpositive/druid/druid_basic2.q.out @@ -236,7 +236,8 @@ STAGE PLANS: tag: 0 value expressions: _col0 (type: string) auto parallelism: true - Execution mode: vectorized + Execution mode: vectorized, llap + LLAP IO: no inputs Path -> Alias: hdfs://### HDFS PATH ### [druid_table_1_n2] Path -> Partition: @@ -325,7 +326,8 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE tag: 1 auto parallelism: true - Execution mode: vectorized + Execution mode: vectorized, llap + LLAP IO: no inputs Path -> Alias: hdfs://### HDFS PATH ### [druid_table_1_n2] Path -> Partition: @@ -396,6 +398,7 @@ STAGE PLANS: Truncated Path -> Alias: /druid_table_1_n2 [druid_table_1_n2] Reducer 2 + Execution mode: llap Needs Tagging: false Reduce Operator Tree: Merge Join Operator @@ -501,7 +504,8 @@ STAGE PLANS: tag: 0 value expressions: _col0 (type: string) auto parallelism: false - Execution mode: vectorized + Execution mode: vectorized, llap + LLAP IO: no inputs Path -> Alias: hdfs://### HDFS PATH ### [druid_table_1_n2] Path -> Partition: @@ -588,7 +592,8 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE tag: 1 auto parallelism: false - Execution mode: vectorized + Execution mode: vectorized, llap + LLAP IO: no inputs Path -> Alias: hdfs://### HDFS PATH ### [druid_table_1_n2] Path -> Partition: @@ -659,6 +664,7 @@ STAGE PLANS: Truncated Path -> Alias: /druid_table_1_n2 [druid_table_1_n2] Reducer 2 + Execution mode: llap Needs Tagging: false Reduce Operator Tree: Merge Join Operator @@ -926,6 +932,8 @@ STAGE PLANS: tag: -1 value expressions: _col3 (type: float), _col4 (type: double) auto parallelism: true + Execution mode: llap + LLAP IO: no inputs Path -> Alias: hdfs://### HDFS PATH ### [druid_table_1_n2] Path -> Partition: @@ -988,6 +996,7 @@ STAGE PLANS: Truncated Path -> Alias: /druid_table_1_n2 [druid_table_1_n2] Reducer 2 + Execution mode: llap Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -1011,6 +1020,7 @@ STAGE PLANS: value expressions: _col0 (type: string), _col1 (type: timestamp with local time zone), _col3 (type: double) auto parallelism: false Reducer 3 + Execution mode: llap Needs Tagging: false Reduce Operator Tree: Select Operator diff --git ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out index bd56df5088..cb2ae1d6a2 100644 --- ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out +++ ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out @@ -137,7 +137,10 @@ STAGE PLANS: Map-reduce partition columns: __time_granularity (type: timestamp), __druid_extra_partition_key (type: bigint) Statistics: Num rows: 12288 Data size: 2601650 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: timestamp with local time zone), _col1 (type: string), _col2 (type: string), _col3 (type: double), _col4 (type: float), _col5 (type: tinyint), _col6 (type: smallint), _col7 (type: int), _col8 (type: bigint), _col9 (type: boolean), _col10 (type: boolean) + Execution mode: llap + LLAP IO: all inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: timestamp with local time zone), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: double), VALUE._col4 (type: float), VALUE._col5 (type: tinyint), VALUE._col6 (type: smallint), VALUE._col7 (type: int), VALUE._col8 (type: bigint), VALUE._col9 (type: boolean), VALUE._col10 (type: boolean), KEY.__time_granularity (type: timestamp), KEY.__druid_extra_partition_key (type: bigint) @@ -373,7 +376,10 @@ STAGE PLANS: Map-reduce partition columns: __time_granularity (type: timestamp), __druid_extra_partition_key (type: bigint) Statistics: Num rows: 12288 Data size: 2601650 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: timestamp with local time zone), _col1 (type: string), _col2 (type: string), _col3 (type: double), _col4 (type: float), _col5 (type: tinyint), _col6 (type: smallint), _col7 (type: int), _col8 (type: bigint), _col9 (type: boolean), _col10 (type: boolean) + Execution mode: llap + LLAP IO: all inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: timestamp with local time zone), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: double), VALUE._col4 (type: float), VALUE._col5 (type: tinyint), VALUE._col6 (type: smallint), VALUE._col7 (type: int), VALUE._col8 (type: bigint), VALUE._col9 (type: boolean), VALUE._col10 (type: boolean), KEY.__time_granularity (type: timestamp), KEY.__druid_extra_partition_key (type: bigint) @@ -511,7 +517,10 @@ STAGE PLANS: Map-reduce partition columns: __time_granularity (type: timestamp), __druid_extra_partition_key (type: bigint) Statistics: Num rows: 12288 Data size: 2601650 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: timestamp with local time zone), _col1 (type: string), _col2 (type: string), _col3 (type: double), _col4 (type: float), _col5 (type: tinyint), _col6 (type: smallint), _col7 (type: int), _col8 (type: bigint), _col9 (type: boolean), _col10 (type: boolean) + Execution mode: llap + LLAP IO: all inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: timestamp with local time zone), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: double), VALUE._col4 (type: float), VALUE._col5 (type: tinyint), VALUE._col6 (type: smallint), VALUE._col7 (type: int), VALUE._col8 (type: bigint), VALUE._col9 (type: boolean), VALUE._col10 (type: boolean), KEY.__time_granularity (type: timestamp), KEY.__druid_extra_partition_key (type: bigint) diff --git ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out index e322d8f91f..610558ad49 100644 --- ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out +++ ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out @@ -260,6 +260,8 @@ STAGE PLANS: tag: -1 value expressions: _col0 (type: bigint) auto parallelism: false + Execution mode: llap + LLAP IO: no inputs Path -> Alias: hdfs://### HDFS PATH ### [druid_table_n0] Path -> Partition: @@ -334,6 +336,7 @@ STAGE PLANS: Truncated Path -> Alias: /druid_table_n0 [druid_table_n0] Reducer 2 + Execution mode: llap Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -436,7 +439,10 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: timestamp with local time zone), _col1 (type: string) Statistics: Num rows: 9173 Data size: 3625856 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint), _col3 (type: double) + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0), sum(VALUE._col1) @@ -504,7 +510,10 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: timestamp with local time zone), _col1 (type: string) Statistics: Num rows: 9173 Data size: 2091840 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint), _col3 (type: double) + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0), sum(VALUE._col1) @@ -572,7 +581,10 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: timestamp with local time zone), _col1 (type: string) Statistics: Num rows: 9173 Data size: 3625856 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint), _col3 (type: double) + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0), sum(VALUE._col1) @@ -640,7 +652,10 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: timestamp with local time zone), _col1 (type: string) Statistics: Num rows: 9173 Data size: 3625856 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint), _col3 (type: double) + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0), sum(VALUE._col1) @@ -705,7 +720,10 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) @@ -765,7 +783,10 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint), _col1 (type: double) + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0), sum(VALUE._col1) @@ -825,7 +846,10 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 216 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint), _col1 (type: double), _col2 (type: int) + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0), sum(VALUE._col1), min(VALUE._col2) @@ -1256,7 +1280,10 @@ STAGE PLANS: sort order: ++ Statistics: Num rows: 9173 Data size: 976192 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: date), KEY.reducesinkkey1 (type: date) diff --git ql/src/test/results/clientpositive/druid/druidmini_joins.q.out ql/src/test/results/clientpositive/druid/druidmini_joins.q.out index 014c7b5406..de0f2d8f21 100644 --- ql/src/test/results/clientpositive/druid/druidmini_joins.q.out +++ ql/src/test/results/clientpositive/druid/druidmini_joins.q.out @@ -117,6 +117,8 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE + Execution mode: llap + LLAP IO: no inputs Map 4 Map Operator Tree: TableScan @@ -133,7 +135,10 @@ STAGE PLANS: Map-reduce partition columns: username (type: string) Statistics: Num rows: 6 Data size: 1152 Basic stats: COMPLETE Column stats: NONE value expressions: double1 (type: double) + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Merge Join Operator condition map: @@ -156,6 +161,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 1267 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 3 + Execution mode: llap Reduce Operator Tree: Group By Operator aggregations: sum(VALUE._col0) @@ -220,5 +226,5 @@ POSTHOOK: Output: hdfs://### HDFS PATH ### alfred 10.300000190734863 bob 3.140000104904175 bonnie 0.0 -calvin 0.0 charlie 25.600000381469727 +calvin 0.0 diff --git ql/src/test/results/clientpositive/druid/druidmini_mv.q.out ql/src/test/results/clientpositive/druid/druidmini_mv.q.out index 383f2dc458..12de110700 100644 --- ql/src/test/results/clientpositive/druid/druidmini_mv.q.out +++ ql/src/test/results/clientpositive/druid/druidmini_mv.q.out @@ -230,6 +230,8 @@ STAGE PLANS: sort order: Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: double) + Execution mode: llap + LLAP IO: may be used (ACID table) Map 3 Map Operator Tree: TableScan @@ -247,7 +249,10 @@ STAGE PLANS: sort order: Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: double) + Execution mode: llap + LLAP IO: may be used (ACID table) Reducer 2 + Execution mode: llap Reduce Operator Tree: Merge Join Operator condition map: @@ -355,6 +360,8 @@ STAGE PLANS: sort order: Statistics: Num rows: 7 Data size: 84 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: double) + Execution mode: llap + LLAP IO: may be used (ACID table) Map 3 Map Operator Tree: TableScan @@ -372,7 +379,10 @@ STAGE PLANS: sort order: Statistics: Num rows: 7 Data size: 112 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: double) + Execution mode: llap + LLAP IO: may be used (ACID table) Reducer 2 + Execution mode: llap Reduce Operator Tree: Merge Join Operator condition map: @@ -480,7 +490,10 @@ STAGE PLANS: Map-reduce partition columns: __time_granularity (type: timestamp) Statistics: Num rows: 7 Data size: 5124 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: timestamp with local time zone), _col1 (type: int), _col2 (type: varchar(256)), _col3 (type: double), _col4 (type: varchar(256)) + Execution mode: llap + LLAP IO: may be used (ACID table) Reducer 2 + Execution mode: llap Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: timestamp with local time zone), VALUE._col1 (type: int), VALUE._col2 (type: varchar(256)), VALUE._col3 (type: double), VALUE._col4 (type: varchar(256)), KEY.__time_granularity (type: timestamp) @@ -567,6 +580,8 @@ STAGE PLANS: sort order: Statistics: Num rows: 7 Data size: 112 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) + Execution mode: llap + LLAP IO: may be used (ACID table) Map 3 Map Operator Tree: TableScan @@ -581,7 +596,10 @@ STAGE PLANS: sort order: Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE value expressions: a (type: int), c (type: double) + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Merge Join Operator condition map: diff --git ql/src/test/results/clientpositive/druid/druidmini_test1.q.out ql/src/test/results/clientpositive/druid/druidmini_test1.q.out index 7f6c6b0aa3..f93841f11c 100644 --- ql/src/test/results/clientpositive/druid/druidmini_test1.q.out +++ ql/src/test/results/clientpositive/druid/druidmini_test1.q.out @@ -316,7 +316,10 @@ STAGE PLANS: sort order: + Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp with local time zone) @@ -400,7 +403,10 @@ STAGE PLANS: sort order: + Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp with local time zone) @@ -486,7 +492,10 @@ STAGE PLANS: sort order: + Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp with local time zone) @@ -574,7 +583,10 @@ STAGE PLANS: sort order: + Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp with local time zone) @@ -662,7 +674,10 @@ STAGE PLANS: sort order: + Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp with local time zone) @@ -750,7 +765,10 @@ STAGE PLANS: sort order: + Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 + Execution mode: llap + LLAP IO: no inputs Reducer 2 + Execution mode: llap Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp with local time zone)