diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java index 5a48d0f0dc..3e2a1711f6 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java @@ -90,7 +90,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.druid.serde.HiveDruidSerializationModule; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils; @@ -195,9 +194,6 @@ JSON_MAPPER.setInjectableValues(injectableValues); SMILE_MAPPER.setInjectableValues(injectableValues); - HiveDruidSerializationModule hiveDruidSerializationModule = new HiveDruidSerializationModule(); - JSON_MAPPER.registerModule(hiveDruidSerializationModule); - SMILE_MAPPER.registerModule(hiveDruidSerializationModule); // Register the shard sub type to be used by the mapper JSON_MAPPER.registerSubtypes(new NamedType(LinearShardSpec.class, "linear")); JSON_MAPPER.registerSubtypes(new NamedType(NumberedShardSpec.class, "numbered")); diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java index f0e12a22fe..dc8ce1fae4 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java @@ -99,7 +99,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.hive.druid.serde.DruidSerDeUtils.TIMESTAMP_FORMAT; import static org.joda.time.format.ISODateTimeFormat.dateOptionalTimeParser; /** @@ -395,14 +394,15 @@ protected SegmentAnalysis submitMetadataRequest(String address, SegmentMetadataQ } switch (types[i].getPrimitiveCategory()) { case TIMESTAMP: + final TimestampWritableV2 timestampWritable; if (value instanceof Number) { - output.add(new TimestampWritableV2(Timestamp.valueOf( - ZonedDateTime.ofInstant(Instant.ofEpochMilli(((Number) value).longValue()), tsTZTypeInfo.timeZone()) - .format(DateTimeFormatter.ofPattern(TIMESTAMP_FORMAT))))); + timestampWritable = new TimestampWritableV2( + Timestamp.ofEpochMilli(((Number) value).longValue())); } else { - output.add(new TimestampWritableV2(Timestamp.valueOf((String) value))); + timestampWritable = new TimestampWritableV2( + Timestamp.valueOf((String) value)); } - + output.add(timestampWritable); break; case TIMESTAMPLOCALTZ: final long numberOfMillis; diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDeUtils.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDeUtils.java index 630e097c19..c04f2dcbe1 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDeUtils.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDeUtils.java @@ -29,9 +29,6 @@ private static final Logger LOG = LoggerFactory.getLogger(DruidSerDeUtils.class); - protected static final String ISO_TIME_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"; - protected static final String TIMESTAMP_FORMAT = "yyyy-MM-dd HH:mm:ss"; - protected static final String FLOAT_TYPE = "FLOAT"; protected static final String DOUBLE_TYPE = "DOUBLE"; protected static final String LONG_TYPE = "LONG"; diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java deleted file mode 100644 index 8a110ae6e9..0000000000 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.druid.serde; - -import io.druid.java.util.common.granularity.PeriodGranularity; -import io.druid.query.spec.LegacySegmentSpec; - -import com.fasterxml.jackson.core.util.VersionUtil; -import com.fasterxml.jackson.databind.module.SimpleModule; - -import org.joda.time.Interval; - -/** - * This class is used to define/override any serde behavior for classes from druid. - * Currently it is used to override the default behavior when serializing PeriodGranularity to include user timezone. - */ -public class HiveDruidSerializationModule extends SimpleModule { - private static final String NAME = "HiveDruidSerializationModule"; - private static final VersionUtil VERSION_UTIL = new VersionUtil() {}; - - public HiveDruidSerializationModule() { - super(NAME, VERSION_UTIL.version()); - addSerializer(PeriodGranularity.class, new PeriodGranularitySerializer()); - } -} \ No newline at end of file diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/PeriodGranularitySerializer.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/PeriodGranularitySerializer.java deleted file mode 100644 index 10f91729e8..0000000000 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/PeriodGranularitySerializer.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.druid.serde; - -import io.druid.java.util.common.granularity.PeriodGranularity; - -import com.fasterxml.jackson.core.JsonGenerator; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JsonSerializer; -import com.fasterxml.jackson.databind.SerializerProvider; -import com.fasterxml.jackson.databind.jsontype.TypeSerializer; - -import org.joda.time.DateTimeZone; - -import java.io.IOException; - -public class PeriodGranularitySerializer extends JsonSerializer { - - @Override - public void serialize(PeriodGranularity granularity, JsonGenerator jsonGenerator, - SerializerProvider serializerProvider) throws IOException, JsonProcessingException { - // Set timezone based on user timezone if origin is not already set - // as it is default Hive time semantics to consider user timezone. - PeriodGranularity granularityWithUserTimezone = new PeriodGranularity( - granularity.getPeriod(), - granularity.getOrigin(), - DateTimeZone.getDefault() - ); - granularityWithUserTimezone.serialize(jsonGenerator, serializerProvider); - } - - @Override - public void serializeWithType(PeriodGranularity value, JsonGenerator gen, - SerializerProvider serializers, TypeSerializer typeSer) throws IOException { - serialize(value, gen, serializers); - } -} - - diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index b98e58712e..ef96d1a841 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -1691,6 +1691,7 @@ spark.perf.disabled.query.files=query14.q,\ query64.q druid.query.files=druidmini_test1.q,\ + druidmini_test_ts.q,\ druid_basic2.q,\ druidmini_joins.q,\ druidmini_test_insert.q,\ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionTimeGranularityOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionTimeGranularityOptimizer.java index 0ce359f4a2..4297537adb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionTimeGranularityOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionTimeGranularityOptimizer.java @@ -267,6 +267,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Lists.newArrayList(fsParent.getSchema().getSignature()); final ArrayList descs = Lists.newArrayList(); final List colNames = Lists.newArrayList(); + PrimitiveCategory timestampType = null; int timestampPos = -1; for (int i = 0; i < parentCols.size(); i++) { ColumnInfo ci = parentCols.get(i); @@ -274,11 +275,13 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, descs.add(columnDesc); colNames.add(columnDesc.getExprString()); if (columnDesc.getTypeInfo().getCategory() == ObjectInspector.Category.PRIMITIVE - && ((PrimitiveTypeInfo) columnDesc.getTypeInfo()).getPrimitiveCategory() == PrimitiveCategory.TIMESTAMPLOCALTZ) { + && (((PrimitiveTypeInfo) columnDesc.getTypeInfo()).getPrimitiveCategory() == PrimitiveCategory.TIMESTAMP || + ((PrimitiveTypeInfo) columnDesc.getTypeInfo()).getPrimitiveCategory() == PrimitiveCategory.TIMESTAMPLOCALTZ)) { if (timestampPos != -1) { - throw new SemanticException("Multiple columns with timestamp with local time-zone type on query result; " - + "could not resolve which one is the timestamp with local time-zone column"); + throw new SemanticException("Multiple columns with timestamp/timestamp with local time-zone type on query result; " + + "could not resolve which one is the right column"); } + timestampType = ((PrimitiveTypeInfo) columnDesc.getTypeInfo()).getPrimitiveCategory(); timestampPos = i; } } @@ -327,8 +330,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } - // Timestamp column type in Druid is timestamp with local time-zone, as it represents - // a specific instant in time. Thus, we have this value and we need to extract the + // Timestamp column type in Druid is either timestamp or timestamp with local time-zone, i.e., + // a specific instant in time. Thus, for the latest, we have this value and we need to extract the // granularity to split the data when we are storing it in Druid. However, Druid stores // the data in UTC. Thus, we need to apply the following logic on the data to extract // the granularity correctly: @@ -341,18 +344,20 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // #1 - Read the column value ExprNodeDesc expr = new ExprNodeColumnDesc(parentCols.get(timestampPos)); - // #2 - UTC epoch for instant - ExprNodeGenericFuncDesc f1 = new ExprNodeGenericFuncDesc( - TypeInfoFactory.longTypeInfo, new GenericUDFEpochMilli(), Lists.newArrayList(expr)); - // #3 - Cast to timestamp - ExprNodeGenericFuncDesc f2 = new ExprNodeGenericFuncDesc( - TypeInfoFactory.timestampTypeInfo, new GenericUDFTimestamp(), Lists.newArrayList(f1)); + if (timestampType == PrimitiveCategory.TIMESTAMPLOCALTZ) { + // #2 - UTC epoch for instant + expr = new ExprNodeGenericFuncDesc( + TypeInfoFactory.longTypeInfo, new GenericUDFEpochMilli(), Lists.newArrayList(expr)); + // #3 - Cast to timestamp + expr = new ExprNodeGenericFuncDesc( + TypeInfoFactory.timestampTypeInfo, new GenericUDFTimestamp(), Lists.newArrayList(expr)); + } // #4 - We apply the granularity function - ExprNodeGenericFuncDesc f3 = new ExprNodeGenericFuncDesc( + expr = new ExprNodeGenericFuncDesc( TypeInfoFactory.timestampTypeInfo, new GenericUDFBridge(udfName, false, udfClass.getName()), - Lists.newArrayList(f2)); - descs.add(f3); + Lists.newArrayList(expr)); + descs.add(expr); colNames.add(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME); // Add granularity to the row schema final ColumnInfo ci = new ColumnInfo(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME, TypeInfoFactory.timestampTypeInfo, diff --git a/ql/src/test/queries/clientpositive/druidmini_test_ts.q b/ql/src/test/queries/clientpositive/druidmini_test_ts.q new file mode 100644 index 0000000000..9e45ae601e --- /dev/null +++ b/ql/src/test/queries/clientpositive/druidmini_test_ts.q @@ -0,0 +1,64 @@ +--! qt:dataset:alltypesorc +CREATE TABLE druid_table_test_ts +STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' +TBLPROPERTIES ("druid.segment.granularity" = "HOUR", "druid.query.granularity" = "MINUTE") +AS +SELECT `ctimestamp1` as `__time`, + cstring1, + cstring2, + cdouble, + cfloat, + ctinyint, + csmallint, + cint, + cbigint, + cboolean1, + cboolean2 + FROM alltypesorc where ctimestamp1 IS NOT NULL; + +-- Time Series Query +SELECT count(*) FROM druid_table_test_ts; + +SELECT floor_year(`__time`), SUM(cfloat), SUM(cdouble), SUM(ctinyint), SUM(csmallint),SUM(cint), SUM(cbigint) +FROM druid_table_test_ts GROUP BY floor_year(`__time`); + +SELECT floor_year(`__time`), MIN(cfloat), MIN(cdouble), MIN(ctinyint), MIN(csmallint),MIN(cint), MIN(cbigint) +FROM druid_table_test_ts GROUP BY floor_year(`__time`); + +SELECT floor_year(`__time`), MAX(cfloat), MAX(cdouble), MAX(ctinyint), MAX(csmallint),MAX(cint), MAX(cbigint) +FROM druid_table_test_ts GROUP BY floor_year(`__time`); + + +-- Group By + +SELECT cstring1, SUM(cdouble) as s FROM druid_table_test_ts GROUP BY cstring1 ORDER BY s ASC LIMIT 10; + +SELECT cstring2, MAX(cdouble) FROM druid_table_test_ts GROUP BY cstring2 ORDER BY cstring2 ASC LIMIT 10; + + +-- TIME STUFF + +SELECT `__time` +FROM druid_table_test_ts ORDER BY `__time` ASC LIMIT 10; + +SELECT `__time` +FROM druid_table_test_ts +WHERE `__time` < '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10; + +SELECT `__time` +FROM druid_table_test_ts +WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10; + +SELECT `__time` +FROM druid_table_test_ts +WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00' + AND `__time` < '2011-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10; + +SELECT `__time` +FROM druid_table_test_ts +WHERE `__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10;; + +SELECT `__time` +FROM druid_table_test_ts +WHERE (`__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00') + OR (`__time` BETWEEN '1968-02-01 00:00:00' AND '1970-04-01 00:00:00') ORDER BY `__time` ASC LIMIT 10; diff --git a/ql/src/test/results/clientpositive/druid/druidmini_test_ts.q.out b/ql/src/test/results/clientpositive/druid/druidmini_test_ts.q.out new file mode 100644 index 0000000000..8168f0bb86 --- /dev/null +++ b/ql/src/test/results/clientpositive/druid/druidmini_test_ts.q.out @@ -0,0 +1,263 @@ +PREHOOK: query: CREATE TABLE druid_table_test_ts +STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' +TBLPROPERTIES ("druid.segment.granularity" = "HOUR", "druid.query.granularity" = "MINUTE") +AS +SELECT `ctimestamp1` as `__time`, + cstring1, + cstring2, + cdouble, + cfloat, + ctinyint, + csmallint, + cint, + cbigint, + cboolean1, + cboolean2 + FROM alltypesorc where ctimestamp1 IS NOT NULL +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@druid_table_test_ts +POSTHOOK: query: CREATE TABLE druid_table_test_ts +STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' +TBLPROPERTIES ("druid.segment.granularity" = "HOUR", "druid.query.granularity" = "MINUTE") +AS +SELECT `ctimestamp1` as `__time`, + cstring1, + cstring2, + cdouble, + cfloat, + ctinyint, + csmallint, + cint, + cbigint, + cboolean1, + cboolean2 + FROM alltypesorc where ctimestamp1 IS NOT NULL +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@druid_table_test_ts +POSTHOOK: Lineage: druid_table_test_ts.__time SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: druid_table_test_ts.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: druid_table_test_ts.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: druid_table_test_ts.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: druid_table_test_ts.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: druid_table_test_ts.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: druid_table_test_ts.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: druid_table_test_ts.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: druid_table_test_ts.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: druid_table_test_ts.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: druid_table_test_ts.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: SELECT count(*) FROM druid_table_test_ts +PREHOOK: type: QUERY +PREHOOK: Input: default@druid_table_test_ts +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: SELECT count(*) FROM druid_table_test_ts +POSTHOOK: type: QUERY +POSTHOOK: Input: default@druid_table_test_ts +POSTHOOK: Output: hdfs://### HDFS PATH ### +6105 +PREHOOK: query: SELECT floor_year(`__time`), SUM(cfloat), SUM(cdouble), SUM(ctinyint), SUM(csmallint),SUM(cint), SUM(cbigint) +FROM druid_table_test_ts GROUP BY floor_year(`__time`) +PREHOOK: type: QUERY +PREHOOK: Input: default@druid_table_test_ts +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: SELECT floor_year(`__time`), SUM(cfloat), SUM(cdouble), SUM(ctinyint), SUM(csmallint),SUM(cint), SUM(cbigint) +FROM druid_table_test_ts GROUP BY floor_year(`__time`) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@druid_table_test_ts +POSTHOOK: Output: hdfs://### HDFS PATH ### +1969-01-01 08:00:00 -39590.24724686146 2.7308662809692383E7 -39967 7781089 1408069801800 10992545287 +PREHOOK: query: SELECT floor_year(`__time`), MIN(cfloat), MIN(cdouble), MIN(ctinyint), MIN(csmallint),MIN(cint), MIN(cbigint) +FROM druid_table_test_ts GROUP BY floor_year(`__time`) +PREHOOK: type: QUERY +PREHOOK: Input: default@druid_table_test_ts +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: SELECT floor_year(`__time`), MIN(cfloat), MIN(cdouble), MIN(ctinyint), MIN(csmallint),MIN(cint), MIN(cbigint) +FROM druid_table_test_ts GROUP BY floor_year(`__time`) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@druid_table_test_ts +POSTHOOK: Output: hdfs://### HDFS PATH ### +1969-01-01 08:00:00 -1790.7781 -308691.84375 2 14255 -1073279343 -8577981133 +PREHOOK: query: SELECT floor_year(`__time`), MAX(cfloat), MAX(cdouble), MAX(ctinyint), MAX(csmallint),MAX(cint), MAX(cbigint) +FROM druid_table_test_ts GROUP BY floor_year(`__time`) +PREHOOK: type: QUERY +PREHOOK: Input: default@druid_table_test_ts +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: SELECT floor_year(`__time`), MAX(cfloat), MAX(cdouble), MAX(ctinyint), MAX(csmallint),MAX(cint), MAX(cbigint) +FROM druid_table_test_ts GROUP BY floor_year(`__time`) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@druid_table_test_ts +POSTHOOK: Output: hdfs://### HDFS PATH ### +1969-01-01 08:00:00 769.16394 1.9565518E7 -45 -8101 1276572707 4923772860 +PREHOOK: query: SELECT cstring1, SUM(cdouble) as s FROM druid_table_test_ts GROUP BY cstring1 ORDER BY s ASC LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@druid_table_test_ts +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: SELECT cstring1, SUM(cdouble) as s FROM druid_table_test_ts GROUP BY cstring1 ORDER BY s ASC LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@druid_table_test_ts +POSTHOOK: Output: hdfs://### HDFS PATH ### +1cGVWH7n1QU -596096.6875 +821UdmGbkEf4j -14161.827026367188 +00iT08 0.0 +02v8WnLuYDos3Cq 0.0 +yv1js 0.0 +02VRbSC5I 0.0 +014ILGhXxNY7g02hl0Xw 0.0 +02vDyIVT752 0.0 +00PafC7v 0.0 +ytpx1RL8F2I 0.0 +PREHOOK: query: SELECT cstring2, MAX(cdouble) FROM druid_table_test_ts GROUP BY cstring2 ORDER BY cstring2 ASC LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@druid_table_test_ts +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: SELECT cstring2, MAX(cdouble) FROM druid_table_test_ts GROUP BY cstring2 ORDER BY cstring2 ASC LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@druid_table_test_ts +POSTHOOK: Output: hdfs://### HDFS PATH ### +NULL 1.9565518E7 +0034fkcXMQI3 15601.0 +004J8y 0.0 +00GNm -200.0 +00GW4dnb6Wgj52 -200.0 +00PBhB1Iefgk 0.0 +00d5kr1wEB7evExG 15601.0 +00qccwt8n 0.0 +017fFeQ3Gcsa83Xj2Vo0 0.0 +01EfkvNk6mjG44uxs 0.0 +PREHOOK: query: SELECT `__time` +FROM druid_table_test_ts ORDER BY `__time` ASC LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@druid_table_test_ts +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: SELECT `__time` +FROM druid_table_test_ts ORDER BY `__time` ASC LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@druid_table_test_ts +POSTHOOK: Output: hdfs://### HDFS PATH ### +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +PREHOOK: query: SELECT `__time` +FROM druid_table_test_ts +WHERE `__time` < '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@druid_table_test_ts +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: SELECT `__time` +FROM druid_table_test_ts +WHERE `__time` < '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@druid_table_test_ts +POSTHOOK: Output: hdfs://### HDFS PATH ### +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +PREHOOK: query: SELECT `__time` +FROM druid_table_test_ts +WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@druid_table_test_ts +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: SELECT `__time` +FROM druid_table_test_ts +WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@druid_table_test_ts +POSTHOOK: Output: hdfs://### HDFS PATH ### +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +PREHOOK: query: SELECT `__time` +FROM druid_table_test_ts +WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00' + AND `__time` < '2011-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@druid_table_test_ts +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: SELECT `__time` +FROM druid_table_test_ts +WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00' + AND `__time` < '2011-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@druid_table_test_ts +POSTHOOK: Output: hdfs://### HDFS PATH ### +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +PREHOOK: query: SELECT `__time` +FROM druid_table_test_ts +WHERE `__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@druid_table_test_ts +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: SELECT `__time` +FROM druid_table_test_ts +WHERE `__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@druid_table_test_ts +POSTHOOK: Output: hdfs://### HDFS PATH ### +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +PREHOOK: query: SELECT `__time` +FROM druid_table_test_ts +WHERE (`__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00') + OR (`__time` BETWEEN '1968-02-01 00:00:00' AND '1970-04-01 00:00:00') ORDER BY `__time` ASC LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@druid_table_test_ts +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: SELECT `__time` +FROM druid_table_test_ts +WHERE (`__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00') + OR (`__time` BETWEEN '1968-02-01 00:00:00' AND '1970-04-01 00:00:00') ORDER BY `__time` ASC LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@druid_table_test_ts +POSTHOOK: Output: hdfs://### HDFS PATH ### +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 +1969-12-31 15:59:00 diff --git a/storage-api/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java b/storage-api/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java index 3894e09a4f..6325d5d1db 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java +++ b/storage-api/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java @@ -38,7 +38,6 @@ * YYYY-MM-DD * */ -@Deprecated public class DateWritable implements WritableComparable { private static final long MILLIS_PER_DAY = TimeUnit.DAYS.toMillis(1);