diff --git druid-handler/pom.xml druid-handler/pom.xml
index 81c744f..48b2af9 100644
--- druid-handler/pom.xml
+++ druid-handler/pom.xml
@@ -53,6 +53,18 @@
com.google.guava
guava
+
+ com.fasterxml.jackson.core
+ jackson-core
+
+
+ com.fasterxml.jackson.core
+ jackson-annotations
+
+
+ com.fasterxml.jackson.core
+ jackson-databind
+
@@ -119,18 +131,6 @@
${druid.version}
- com.fasterxml.jackson.core
- jackson-core
-
-
- com.fasterxml.jackson.core
- jackson-annotations
-
-
- com.fasterxml.jackson.core
- jackson-databind
-
-
com.google.code.findbugs
annotations
@@ -216,24 +216,20 @@
com.google.guava
guava
-
-
-
- org.apache.calcite
- calcite-druid
- ${calcite.version}
-
- org.apache.calcite.avatica
- avatica-core
+ com.fasterxml.jackson.core
+ jackson-core
+
+
+ com.fasterxml.jackson.core
+ jackson-annotations
+
+
+ com.fasterxml.jackson.core
+ jackson-databind
-
- org.apache.calcite.avatica
- avatica
- ${avatica.version}
-
junit
diff --git druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
index 3eeb0c3..05f2130 100644
--- druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
+++ druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
@@ -41,6 +41,7 @@
import io.druid.timeline.partition.PartitionChunk;
import io.druid.timeline.partition.ShardSpec;
+import org.apache.calcite.adapter.druid.LocalInterval;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -122,7 +123,8 @@
private static final Logger LOG = LoggerFactory.getLogger(DruidStorageHandlerUtils.class);
private static final String SMILE_CONTENT_TYPE = "application/x-jackson-smile";
-
+ public static final String DEFAULT_TIMESTAMP_COLUMN = "__time";
+ public static final LocalInterval DEFAULT_INTERVAL = LocalInterval.create("1900-01-01", "3000-01-01");
/**
* Mapper to use to serialize/deserialize Druid objects (JSON)
*/
diff --git druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
index 9d2ec82..8156231 100644
--- druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
+++ druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
@@ -41,7 +41,6 @@
import io.druid.segment.indexing.granularity.UniformGranularitySpec;
import io.druid.segment.realtime.plumber.CustomVersioningPolicy;
-import org.apache.calcite.adapter.druid.DruidTable;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -117,8 +116,8 @@
for (String name : columnNameProperty.split(",")) {
columnNames.add(name);
}
- if (!columnNames.contains(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
- throw new IllegalStateException("Timestamp column (' " + DruidTable.DEFAULT_TIMESTAMP_COLUMN +
+ if (!columnNames.contains(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN)) {
+ throw new IllegalStateException("Timestamp column (' " + DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN +
"') not specified in create table; list of columns is : " +
tableProperties.getProperty(serdeConstants.LIST_COLUMNS));
}
@@ -144,7 +143,7 @@
break;
case TIMESTAMP:
String tColumnName = columnNames.get(i);
- if (!tColumnName.equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN) && !tColumnName
+ if (!tColumnName.equals(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN) && !tColumnName
.equals(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME)) {
throw new IOException("Dimension " + tColumnName + " does not have STRING type: " +
f.getPrimitiveCategory());
@@ -165,7 +164,7 @@
}
List aggregatorFactories = aggregatorFactoryBuilder.build();
final InputRowParser inputRowParser = new MapInputRowParser(new TimeAndDimsParseSpec(
- new TimestampSpec(DruidTable.DEFAULT_TIMESTAMP_COLUMN, "auto", null),
+ new TimestampSpec(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, "auto", null),
new DimensionsSpec(dimensions,
Lists.newArrayList(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME), null
)
diff --git druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
index 2f53616..c7f0769 100644
--- druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
+++ druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
@@ -27,8 +27,6 @@
import java.util.List;
import java.util.Map;
-import org.apache.calcite.adapter.druid.DruidDateTimeUtils;
-import org.apache.calcite.adapter.druid.DruidTable;
import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
@@ -56,7 +54,6 @@
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.jboss.netty.handler.codec.http.HttpMethod;
import org.joda.time.Interval;
-import org.joda.time.Period;
import org.joda.time.chrono.ISOChronology;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -65,10 +62,6 @@
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.google.common.collect.Lists;
-import com.metamx.common.lifecycle.Lifecycle;
-import com.metamx.http.client.HttpClient;
-import com.metamx.http.client.HttpClientConfig;
-import com.metamx.http.client.HttpClientInit;
import com.metamx.http.client.Request;
import io.druid.query.BaseQuery;
@@ -308,7 +301,7 @@ private static String createSelectStarQuery(String dataSource) throws IOExceptio
// following the Select threshold configuration property
final List intervals = new ArrayList<>();
if (query.getIntervals().size() == 1 && query.getIntervals().get(0).withChronology(
- ISOChronology.getInstanceUTC()).equals(DruidTable.DEFAULT_INTERVAL)) {
+ ISOChronology.getInstanceUTC()).equals(DruidStorageHandlerUtils.DEFAULT_INTERVAL)) {
// Default max and min, we should execute a time boundary query to get a
// more precise range
TimeBoundaryQueryBuilder timeBuilder = new Druids.TimeBoundaryQueryBuilder();
diff --git druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java
index e97f588..cf4dad6 100644
--- druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java
+++ druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java
@@ -41,7 +41,6 @@
import io.druid.segment.realtime.plumber.Committers;
import io.druid.timeline.DataSegment;
import io.druid.timeline.partition.LinearShardSpec;
-import org.apache.calcite.adapter.druid.DruidTable;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -239,7 +238,7 @@ public String apply(
@Override
public void write(Writable w) throws IOException {
DruidWritable record = (DruidWritable) w;
- final long timestamp = (long) record.getValue().get(DruidTable.DEFAULT_TIMESTAMP_COLUMN);
+ final long timestamp = (long) record.getValue().get(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN);
final long truncatedTime = (long) record.getValue()
.get(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME);
diff --git druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
index fddabf7..b5b254a 100644
--- druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
+++ druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
@@ -21,7 +21,6 @@
import java.io.InputStream;
import java.util.List;
-import org.apache.calcite.adapter.druid.DruidTable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
@@ -141,7 +140,7 @@ public DruidWritable getCurrentValue() throws IOException, InterruptedException
// Create new value
DruidWritable value = new DruidWritable();
// 1) The timestamp column
- value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, current.getTimestamp().getMillis());
+ value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, current.getTimestamp().getMillis());
// 2) The dimension columns
for (int i = 0; i < query.getDimensions().size(); i++) {
DimensionSpec ds = query.getDimensions().get(i);
@@ -193,7 +192,7 @@ public boolean next(NullWritable key, DruidWritable value) {
// Update value
value.getValue().clear();
// 1) The timestamp column
- value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, current.getTimestamp().getMillis());
+ value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, current.getTimestamp().getMillis());
// 2) The dimension columns
for (int i = 0; i < query.getDimensions().size(); i++) {
DimensionSpec ds = query.getDimensions().get(i);
diff --git druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
index 8a41e91..82eec5d 100644
--- druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
+++ druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
@@ -22,7 +22,6 @@
import java.util.Iterator;
import java.util.List;
-import org.apache.calcite.adapter.druid.DruidTable;
import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
import org.apache.hadoop.io.NullWritable;
@@ -81,7 +80,7 @@ public DruidWritable getCurrentValue() throws IOException, InterruptedException
// Create new value
DruidWritable value = new DruidWritable();
EventHolder e = values.next();
- value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, e.getTimestamp().getMillis());
+ value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, e.getTimestamp().getMillis());
value.getValue().putAll(e.getEvent());
return value;
}
@@ -92,7 +91,7 @@ public boolean next(NullWritable key, DruidWritable value) throws IOException {
// Update value
value.getValue().clear();
EventHolder e = values.next();
- value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, e.getTimestamp().getMillis());
+ value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, e.getTimestamp().getMillis());
value.getValue().putAll(e.getEvent());
return true;
}
diff --git druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
index 8d98b3b..8750285 100644
--- druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
+++ druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
@@ -27,7 +27,6 @@
import java.util.Map.Entry;
import java.util.Properties;
-import org.apache.calcite.adapter.druid.DruidTable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.type.HiveChar;
import org.apache.hadoop.hive.common.type.HiveDecimal;
@@ -127,8 +126,8 @@ public void initialize(Configuration configuration, Properties properties) throw
&& !org.apache.commons.lang3.StringUtils
.isEmpty(properties.getProperty(serdeConstants.LIST_COLUMN_TYPES))) {
columnNames.addAll(Utilities.getColumnNames(properties));
- if (!columnNames.contains(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
- throw new SerDeException("Timestamp column (' " + DruidTable.DEFAULT_TIMESTAMP_COLUMN +
+ if (!columnNames.contains(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN)) {
+ throw new SerDeException("Timestamp column (' " + DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN +
"') not specified in create table; list of columns is : " +
properties.getProperty(serdeConstants.LIST_COLUMNS));
}
@@ -181,7 +180,7 @@ public ObjectInspector apply(PrimitiveTypeInfo type) {
throw new SerDeException(e);
}
for (Entry columnInfo : schemaInfo.getColumns().entrySet()) {
- if (columnInfo.getKey().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
+ if (columnInfo.getKey().equals(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN)) {
// Special handling for timestamp column
columnNames.add(columnInfo.getKey()); // field name
PrimitiveTypeInfo type = TypeInfoFactory.timestampTypeInfo; // field type
@@ -308,7 +307,7 @@ private void inferSchema(TimeseriesQuery query,
List columnNames, List columnTypes,
Map mapColumnNamesTypes) {
// Timestamp column
- columnNames.add(DruidTable.DEFAULT_TIMESTAMP_COLUMN);
+ columnNames.add(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN);
columnTypes.add(TypeInfoFactory.timestampTypeInfo);
// Aggregator columns
for (AggregatorFactory af : query.getAggregatorSpecs()) {
@@ -336,7 +335,7 @@ private void inferSchema(TopNQuery query,
List columnNames, List columnTypes,
Map mapColumnNamesTypes) {
// Timestamp column
- columnNames.add(DruidTable.DEFAULT_TIMESTAMP_COLUMN);
+ columnNames.add(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN);
columnTypes.add(TypeInfoFactory.timestampTypeInfo);
// Dimension column
columnNames.add(query.getDimensionSpec().getOutputName());
@@ -368,7 +367,7 @@ private void inferSchema(SelectQuery query,
String address, Map mapColumnNamesTypes)
throws SerDeException {
// Timestamp column
- columnNames.add(DruidTable.DEFAULT_TIMESTAMP_COLUMN);
+ columnNames.add(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN);
columnTypes.add(TypeInfoFactory.timestampTypeInfo);
// Dimension columns
for (DimensionSpec ds : query.getDimensions()) {
@@ -410,7 +409,7 @@ private void inferSchema(GroupByQuery query,
List columnNames, List columnTypes,
Map mapColumnNamesTypes) {
// Timestamp column
- columnNames.add(DruidTable.DEFAULT_TIMESTAMP_COLUMN);
+ columnNames.add(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN);
columnTypes.add(TypeInfoFactory.timestampTypeInfo);
// Dimension columns
for (DimensionSpec ds : query.getDimensions()) {
diff --git druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
index 8c2fb10..a1c8488 100644
--- druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
+++ druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
@@ -21,7 +21,6 @@
import java.io.InputStream;
import java.util.List;
-import org.apache.calcite.adapter.druid.DruidTable;
import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
import org.apache.hadoop.io.NullWritable;
@@ -71,7 +70,7 @@ public NullWritable getCurrentKey() throws IOException, InterruptedException {
public DruidWritable getCurrentValue() throws IOException, InterruptedException {
// Create new value
DruidWritable value = new DruidWritable();
- value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, current.getTimestamp().getMillis());
+ value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, current.getTimestamp().getMillis());
value.getValue().putAll(current.getValue().getBaseObject());
return value;
}
@@ -81,7 +80,7 @@ public boolean next(NullWritable key, DruidWritable value) {
if (nextKeyValue()) {
// Update value
value.getValue().clear();
- value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, current.getTimestamp().getMillis());
+ value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, current.getTimestamp().getMillis());
value.getValue().putAll(current.getValue().getBaseObject());
return true;
}
diff --git druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
index d431925..afdf670 100644
--- druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
+++ druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
@@ -22,7 +22,6 @@
import java.util.Iterator;
import java.util.List;
-import org.apache.calcite.adapter.druid.DruidTable;
import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
import org.apache.hadoop.io.NullWritable;
@@ -80,7 +79,7 @@ public NullWritable getCurrentKey() throws IOException, InterruptedException {
public DruidWritable getCurrentValue() throws IOException, InterruptedException {
// Create new value
DruidWritable value = new DruidWritable();
- value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, current.getTimestamp().getMillis());
+ value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, current.getTimestamp().getMillis());
if (values.hasNext()) {
value.getValue().putAll(values.next().getBaseObject());
return value;
@@ -93,7 +92,7 @@ public boolean next(NullWritable key, DruidWritable value) {
if (nextKeyValue()) {
// Update value
value.getValue().clear();
- value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, current.getTimestamp().getMillis());
+ value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, current.getTimestamp().getMillis());
if (values.hasNext()) {
value.getValue().putAll(values.next().getBaseObject());
}
diff --git druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
index 2aeb279..fb15830 100644
--- druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
+++ druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
@@ -139,7 +139,7 @@ public void testCreateSplitsIntervals() throws Exception {
private static final String TIMESERIES_QUERY =
"{ \"queryType\": \"timeseries\", "
+ " \"dataSource\": \"sample_datasource\", "
- + " \"granularity\": \"day\", "
+ + " \"granularity\": \"DAY\", "
+ " \"descending\": \"true\", "
+ " \"intervals\": [ \"2012-01-01T00:00:00.000/2012-01-03T00:00:00.000\" ]}";
private static final String TIMESERIES_QUERY_SPLIT =
@@ -149,7 +149,7 @@ public void testCreateSplitsIntervals() throws Exception {
+ "\"descending\":true,"
+ "\"virtualColumns\":[],"
+ "\"filter\":null,"
- + "\"granularity\":{\"type\":\"period\",\"period\":\"P1D\",\"timeZone\":\"America/Los_Angeles\",\"origin\":null},"
+ + "\"granularity\":\"DAY\","
+ "\"aggregations\":[],"
+ "\"postAggregations\":[],"
+ "\"context\":null}, [localhost:8082]}]";
@@ -213,7 +213,7 @@ public void testCreateSplitsIntervals() throws Exception {
+ "\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2012-01-01T00:00:00.000-08:00/2012-01-03T00:00:00.000-08:00\"]},"
+ "\"virtualColumns\":[],"
+ "\"filter\":null,"
- + "\"granularity\":{\"type\":\"period\",\"period\":\"P1D\",\"timeZone\":\"America/Los_Angeles\",\"origin\":null},"
+ + "\"granularity\":\"DAY\","
+ "\"dimensions\":[{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"country\",\"outputName\":\"country\",\"outputType\":\"STRING\"},"
+ "{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"device\",\"outputName\":\"device\",\"outputType\":\"STRING\"}],"
+ "\"aggregations\":[{\"type\":\"longSum\",\"name\":\"total_usage\",\"fieldName\":\"user_count\",\"expression\":null},"
diff --git druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
index 4962e0b..b16de63 100644
--- druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
+++ druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
@@ -87,21 +87,21 @@
final List> expectedRows = ImmutableList.of(
ImmutableMap.of(
- DruidTable.DEFAULT_TIMESTAMP_COLUMN,
+ DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN,
DateTime.parse("2014-10-22T00:00:00.000Z").getMillis(),
"host", ImmutableList.of("a.example.com"),
"visited_sum", 190L,
"unique_hosts", 1.0d
),
ImmutableMap.of(
- DruidTable.DEFAULT_TIMESTAMP_COLUMN,
+ DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN,
DateTime.parse("2014-10-22T01:00:00.000Z").getMillis(),
"host", ImmutableList.of("b.example.com"),
"visited_sum", 175L,
"unique_hosts", 1.0d
),
ImmutableMap.of(
- DruidTable.DEFAULT_TIMESTAMP_COLUMN,
+ DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN,
DateTime.parse("2014-10-22T02:00:00.000Z").getMillis(),
"host", ImmutableList.of("c.example.com"),
"visited_sum", 270L,
@@ -109,6 +109,16 @@
)
);
+
+ @Test
+ public void testTimeStampColumnName() {
+ Assert.assertEquals("Time column name need to match to ensure serdeser compatibility",
+ DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, DruidTable.DEFAULT_TIMESTAMP_COLUMN
+ );
+ Assert.assertEquals("Default intervals need to match",
+ DruidStorageHandlerUtils.DEFAULT_INTERVAL, DruidTable.DEFAULT_INTERVAL
+ );
+ }
// This test fails due to conflict of guava classes with hive-exec jar.
@Ignore
@Test
@@ -120,7 +130,7 @@ public void testWrite() throws IOException, SegmentLoadingException {
Configuration config = new Configuration();
final InputRowParser inputRowParser = new MapInputRowParser(new TimeAndDimsParseSpec(
- new TimestampSpec(DruidTable.DEFAULT_TIMESTAMP_COLUMN, "auto", null),
+ new TimestampSpec(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, "auto", null),
new DimensionsSpec(ImmutableList.of(new StringDimensionSchema("host")),
null, null
)
@@ -169,7 +179,7 @@ public DruidWritable apply(@Nullable ImmutableMap input
.put(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME,
Granularities.DAY.bucketStart(
new DateTime((long) input
- .get(DruidTable.DEFAULT_TIMESTAMP_COLUMN)))
+ .get(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN)))
.getMillis()
).build());
}
@@ -217,7 +227,7 @@ private void verifyRows(List> expectedRows,
Assert.assertEquals(ImmutableList.of("host"), actual.getDimensions());
- Assert.assertEquals(expected.get(DruidTable.DEFAULT_TIMESTAMP_COLUMN),
+ Assert.assertEquals(expected.get(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN),
actual.getTimestamp().getMillis()
);
Assert.assertEquals(expected.get("host"), actual.getDimension("host"));