diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/BaseVectorizedColumnReader.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/BaseVectorizedColumnReader.java index 907a9b8..beacd1b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/BaseVectorizedColumnReader.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/BaseVectorizedColumnReader.java @@ -14,10 +14,10 @@ package org.apache.hadoop.hive.ql.io.parquet.vector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.parquet.bytes.BytesInput; import org.apache.parquet.bytes.BytesUtils; import org.apache.parquet.column.ColumnDescriptor; -import org.apache.parquet.column.Dictionary; import org.apache.parquet.column.Encoding; import org.apache.parquet.column.page.DataPage; import org.apache.parquet.column.page.DataPageV1; @@ -62,7 +62,7 @@ /** * The dictionary, if this column has dictionary encoding. */ - protected final Dictionary dictionary; + protected final ParquetDictionaryDataColumnReader dictionary; /** * If true, the current page is dictionary encoded. @@ -82,7 +82,7 @@ */ protected IntIterator repetitionLevelColumn; protected IntIterator definitionLevelColumn; - protected ValuesReader dataColumn; + protected ParquetDataColumnReader dataColumn; /** * Total values in the current page. @@ -92,22 +92,38 @@ protected final PageReader pageReader; protected final ColumnDescriptor descriptor; protected final Type type; + protected final TypeInfo hiveType; + + /** + * Used for VectorizedDummyColumnReader. + */ + public BaseVectorizedColumnReader(){ + this.pageReader = null; + this.descriptor = null; + this.type = null; + this.dictionary = null; + this.hiveType = null; + this.maxDefLevel = -1; + } public BaseVectorizedColumnReader( ColumnDescriptor descriptor, PageReader pageReader, boolean skipTimestampConversion, - Type type) throws IOException { + Type parquetType, TypeInfo hiveType) throws IOException { this.descriptor = descriptor; - this.type = type; + this.type = parquetType; this.pageReader = pageReader; this.maxDefLevel = descriptor.getMaxDefinitionLevel(); this.skipTimestampConversion = skipTimestampConversion; + this.hiveType = hiveType; DictionaryPage dictionaryPage = pageReader.readDictionaryPage(); if (dictionaryPage != null) { try { - this.dictionary = dictionaryPage.getEncoding().initDictionary(descriptor, dictionaryPage); + this.dictionary = ParquetDictionaryDataColumnReaderFactory.getDataColumnReaderByType + (parquetType.asPrimitiveType().getPrimitiveTypeName(), hiveType, dictionaryPage + .getEncoding().initDictionary(descriptor, dictionaryPage),skipTimestampConversion); this.isCurrentPageDictionaryEncoded = true; } catch (IOException e) { throw new IOException("could not decode the dictionary for " + descriptor, e); @@ -146,7 +162,8 @@ public Void visit(DataPageV2 dataPageV2) { }); } - private void initDataReader(Encoding dataEncoding, byte[] bytes, int offset, int valueCount) throws IOException { + private void initDataReader(Encoding dataEncoding, byte[] bytes, int offset, int valueCount) + throws IOException { this.pageValueCount = valueCount; this.endOfPageValueCount = valuesRead + pageValueCount; if (dataEncoding.usesDictionary()) { @@ -156,10 +173,13 @@ private void initDataReader(Encoding dataEncoding, byte[] bytes, int offset, int "could not read page in col " + descriptor + " as the dictionary was missing for encoding " + dataEncoding); } - dataColumn = dataEncoding.getDictionaryBasedValuesReader(descriptor, VALUES, dictionary); + dataColumn = ParquetDataColumnReaderFactory.getDataColumnReaderByType(descriptor.getType(), + hiveType, dataEncoding.getDictionaryBasedValuesReader(descriptor, VALUES, dictionary + .getDictionary()), skipTimestampConversion); this.isCurrentPageDictionaryEncoded = true; } else { - dataColumn = dataEncoding.getValuesReader(descriptor, VALUES); + dataColumn = ParquetDataColumnReaderFactory.getDataColumnReaderByType(descriptor.getType(), + hiveType, dataEncoding.getValuesReader(descriptor, VALUES), skipTimestampConversion); this.isCurrentPageDictionaryEncoded = false; } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDataColumnReader.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDataColumnReader.java new file mode 100644 index 0000000..f0289fb --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDataColumnReader.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.io.parquet.vector; + +import java.io.IOException; +import java.sql.Timestamp; + +public interface ParquetDataColumnReader { + void initFromPage(int valueCount, byte[] page, int offset) throws IOException; + + int readValueDictionaryId(); + + long readLong(); + + long readBoolean(); + + byte[] readBytes(); + + double readDouble(); + + Timestamp readTimestamp(); +} diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDataColumnReaderFactory.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDataColumnReaderFactory.java new file mode 100644 index 0000000..4f20901 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDataColumnReaderFactory.java @@ -0,0 +1,253 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.io.parquet.vector; + +import org.apache.hadoop.hive.ql.io.parquet.timestamp.NanoTime; +import org.apache.hadoop.hive.ql.io.parquet.timestamp.NanoTimeUtils; +import org.apache.hadoop.hive.serde2.io.TimestampWritable; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.parquet.column.values.ValuesReader; +import org.apache.parquet.io.api.Binary; +import org.apache.parquet.schema.PrimitiveType; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.sql.Timestamp; + +/** + * Parquet file has self-describing schema which may differ from the user required schema (e.g. + * schema evolution). This factory is used to retrieve user required typed data via corresponding + * reader which reads the underlying data. + */ +public final class ParquetDataColumnReaderFactory { + + private ParquetDataColumnReaderFactory() { + } + + /** + * The default data column reader for existing Parquet page reader which works for both + * dictionary or non dictionary types. + */ + public static class DefaultParquetDataColumnReader implements ParquetDataColumnReader { + protected ValuesReader realReader; + protected boolean skipTimestampConversion; + + public DefaultParquetDataColumnReader(ValuesReader realReader) { + this.realReader = realReader; + this.skipTimestampConversion = false; + } + + public DefaultParquetDataColumnReader(ValuesReader realReader, boolean skipTimestampConversion) { + this.realReader = realReader; + this.skipTimestampConversion = skipTimestampConversion; + } + + public void initFromPage(int i, ByteBuffer byteBuffer, int i1) throws IOException { + realReader.initFromPage(i, byteBuffer, i1); + } + + public void initFromPage(int valueCount, byte[] page, int offset) throws IOException { + this.initFromPage(valueCount, ByteBuffer.wrap(page), offset); + } + + /** + * @return the next boolean from the page + */ + public long readBoolean() { + return realReader.readBoolean() ? 1 : 0; + } + + /** + * @return the next Binary from the page + */ + public byte[] readBytes() { + return realReader.readBytes().getBytesUnsafe(); + } + + /** + * @return the next float from the page + */ + public float readFloat() { + return realReader.readFloat(); + } + + /** + * @return the next double from the page + */ + public double readDouble() { + return realReader.readDouble(); + } + + @Override + public Timestamp readTimestamp() { + NanoTime nt = NanoTime.fromBinary(realReader.readBytes()); + return NanoTimeUtils.getTimestamp(nt, skipTimestampConversion); + } + + /** + * @return the next integer from the page + */ + public int readInteger() { + return realReader.readInteger(); + } + + /** + * @return the next long from the page + */ + public long readLong() { + return realReader.readLong(); + } + + public int readValueDictionaryId() { + return realReader.readValueDictionaryId(); + } + + public void skip() { + realReader.skip(); + } + } + + /** + * The reader who reads from the underlying int32 value value. Implementation is in consist with + * ETypeConverter EINT32_CONVERTER + */ + public static class TypesFromInt32PageReader extends DefaultParquetDataColumnReader { + + public TypesFromInt32PageReader(ValuesReader realReader) { + super(realReader); + } + + @Override + public long readLong() { + return realReader.readInteger(); + } + + @Override + public float readFloat() { + return realReader.readInteger(); + } + + @Override + public double readDouble() { + return realReader.readInteger(); + } + } + + /** + * The reader who reads from the underlying int64 value value. Implementation is in consist with + * ETypeConverter EINT64_CONVERTER + */ + public static class TypesFromInt64PageReader extends DefaultParquetDataColumnReader { + + public TypesFromInt64PageReader(ValuesReader realReader) { + super(realReader); + } + + @Override + public float readFloat() { + return realReader.readLong(); + } + + @Override + public double readDouble() { + return realReader.readLong(); + } + } + + /** + * The reader who reads from the underlying float value value. Implementation is in consist with + * ETypeConverter EFLOAT_CONVERTER + */ + public static class TypesFromFloatPageReader extends DefaultParquetDataColumnReader { + + public TypesFromFloatPageReader(ValuesReader realReader) { + super(realReader); + } + + @Override + public double readDouble() { + return realReader.readFloat(); + } + } + + public static class StringFromInt96PageReader extends DefaultParquetDataColumnReader { + + public StringFromInt96PageReader(ValuesReader realReader, boolean skipTimestampConversion) { + super(realReader, skipTimestampConversion); + } + + @Override + public byte[] readBytes() { + return new TimestampWritable(readTimestamp()).toString().getBytes(); + } + } + + public static ParquetDataColumnReader getDataColumnReaderByType(PrimitiveType + .PrimitiveTypeName + parquetType, TypeInfo hiveType, + ValuesReader realReader, boolean skipTimestampConversion) + throws IOException { + switch (parquetType) { + case INT32: + return new TypesFromInt32PageReader(realReader); + case INT64: + return new TypesFromInt64PageReader(realReader); + case FLOAT: + return new TypesFromFloatPageReader(realReader); + case INT96: + return getConvertorFromInt96((PrimitiveTypeInfo) hiveType, realReader, skipTimestampConversion); + case BOOLEAN: + case BINARY: + case DOUBLE: + case FIXED_LEN_BYTE_ARRAY: + return new DefaultParquetDataColumnReader(realReader); + default: + throw new IOException("Invalid category " + parquetType); + } + } + + private static ParquetDataColumnReader getConvertorFromInt96(PrimitiveTypeInfo hiveType, + ValuesReader realReader, + boolean skipTimestampConversion) { + switch (hiveType.getPrimitiveCategory()){ + case STRING: + case CHAR: + case VARCHAR: + return new StringFromInt96PageReader(realReader, skipTimestampConversion); + case INT: + case BYTE: + case SHORT: + case DATE: + case INTERVAL_YEAR_MONTH: + case LONG: + case BOOLEAN: + case DOUBLE: + case BINARY: + case FLOAT: + case DECIMAL: + case TIMESTAMP: + case INTERVAL_DAY_TIME: + return new DefaultParquetDataColumnReader(realReader, skipTimestampConversion); + default: + throw new RuntimeException("Unsupported type conversion from Hive type: " + hiveType + " to " + + "Parquet int96"); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDictionaryDataColumnReader.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDictionaryDataColumnReader.java new file mode 100644 index 0000000..d46de26 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDictionaryDataColumnReader.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.io.parquet.vector; + +import org.apache.parquet.column.Dictionary; + +import java.sql.Timestamp; + +public interface ParquetDictionaryDataColumnReader { + Dictionary getDictionary(); + + byte[] decodeToBinary(int id); + + float decodeToFloat(int id); + + double decodeToDouble(int id); + + int decodeToInt(int id); + + long decodeToLong(int id); + + boolean decodeToBoolean(int id); + + Timestamp decodeToTimestamp(int id); +} diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDictionaryDataColumnReaderFactory.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDictionaryDataColumnReaderFactory.java new file mode 100644 index 0000000..4b177b0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDictionaryDataColumnReaderFactory.java @@ -0,0 +1,231 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.io.parquet.vector; + +import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector; +import org.apache.hadoop.hive.ql.io.parquet.timestamp.NanoTime; +import org.apache.hadoop.hive.ql.io.parquet.timestamp.NanoTimeUtils; +import org.apache.hadoop.hive.serde2.io.TimestampWritable; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.parquet.column.Dictionary; +import org.apache.parquet.io.api.Binary; +import org.apache.parquet.schema.PrimitiveType; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.sql.Timestamp; + +/** + * Parquet file has self-describing schema which may differ from the user required schema (e.g. + * schema evolution). This factory is used to retrieve user required typed data via corresponding + * reader which reads the underlying data. + */ +public final class ParquetDictionaryDataColumnReaderFactory { + + private ParquetDictionaryDataColumnReaderFactory() { + } + + /** + * The default data column reader for existing Parquet page reader which works for both + * dictionary or non dictionary types. + */ + public static class DefaultDictionaryParquetDataColumnReader implements ParquetDictionaryDataColumnReader { + protected Dictionary realReader; + protected boolean skipTimestampConversion = false; + + public DefaultDictionaryParquetDataColumnReader(Dictionary realReader) { + this.realReader = realReader; + } + + public DefaultDictionaryParquetDataColumnReader(Dictionary realReader, boolean skipTimestampConversion) { + this.realReader = realReader; + this.skipTimestampConversion = skipTimestampConversion; + } + + public boolean readBoolean(int id) { + return realReader.decodeToBoolean(id); + } + + @Override + public Dictionary getDictionary() { + return realReader; + } + + public byte[] decodeToBinary(int id) { + return realReader.decodeToBinary(id).getBytesUnsafe(); + } + + public float decodeToFloat(int id) { + return realReader.decodeToFloat(id); + } + + public double decodeToDouble(int id) { + return realReader.decodeToDouble(id); + } + + public int decodeToInt(int id) { + return realReader.decodeToInt(id); + } + + public long decodeToLong(int id) { + return realReader.decodeToLong(id); + } + + public boolean decodeToBoolean(int id){ + return realReader.decodeToBoolean(id); + } + + @Override + public Timestamp decodeToTimestamp(int id) { + ByteBuffer buf = realReader.decodeToBinary(id).toByteBuffer(); + buf.order(ByteOrder.LITTLE_ENDIAN); + long timeOfDayNanos = buf.getLong(); + int julianDay = buf.getInt(); + NanoTime nt = new NanoTime(julianDay, timeOfDayNanos); + return NanoTimeUtils.getTimestamp(nt, skipTimestampConversion); + } + } + + /** + * The reader who reads from the underlying int32 value value. Implementation is in consist with + * ETypeConverter EINT32_CONVERTER + */ + public static class TypesFromInt32DictionaryPageReader extends DefaultDictionaryParquetDataColumnReader { + + public TypesFromInt32DictionaryPageReader(Dictionary realReader) { + super(realReader); + } + + @Override + public long decodeToLong(int id) { + return realReader.decodeToInt(id); + } + + @Override + public float decodeToFloat(int id) { + return realReader.decodeToInt(id); + } + + @Override + public double decodeToDouble(int id) { + return realReader.decodeToInt(id); + } + } + + /** + * The reader who reads from the underlying int64 value value. Implementation is in consist with + * ETypeConverter EINT64_CONVERTER + */ + public static class TypesFromInt64DictionaryPageReader extends DefaultDictionaryParquetDataColumnReader { + + public TypesFromInt64DictionaryPageReader(Dictionary realReader) { + super(realReader); + } + + @Override + public float decodeToFloat(int id) { + return realReader.decodeToLong(id); + } + + @Override + public double decodeToDouble(int id) { + return realReader.decodeToLong(id); + } + } + + /** + * The reader who reads from the underlying float value value. Implementation is in consist with + * ETypeConverter EFLOAT_CONVERTER + */ + public static class TypesFromFloatDictionaryPageReader extends DefaultDictionaryParquetDataColumnReader { + + public TypesFromFloatDictionaryPageReader(Dictionary realReader) { + super(realReader); + } + + @Override + public double decodeToDouble(int id) { + return realReader.decodeToFloat(id); + } + } + + public static class StringFromInt96DictionaryPageReader extends DefaultDictionaryParquetDataColumnReader { + + public StringFromInt96DictionaryPageReader(Dictionary realReader, boolean skipTimestampConversion) { + super(realReader, skipTimestampConversion); + } + + @Override + public byte[] decodeToBinary(int id) { + return decodeToTimestamp(id).toString().getBytes(); + } + } + + public static ParquetDictionaryDataColumnReader getDataColumnReaderByType( + PrimitiveType.PrimitiveTypeName parquetType, TypeInfo typeInfo, Dictionary realReader, + boolean skipTimestampConversion) throws IOException { + switch (parquetType) { + case INT32: + return new TypesFromInt32DictionaryPageReader(realReader); + case INT64: + return new TypesFromInt64DictionaryPageReader(realReader); + case FLOAT: + return new TypesFromFloatDictionaryPageReader(realReader); + case BOOLEAN: + case BINARY: + case DOUBLE: + case INT96: + return getConvertorFromInt96((PrimitiveTypeInfo) typeInfo, realReader, skipTimestampConversion); + case FIXED_LEN_BYTE_ARRAY: + return new DefaultDictionaryParquetDataColumnReader(realReader); + default: + throw new IOException("Invalid category " + parquetType); + } + } + + private static ParquetDictionaryDataColumnReader getConvertorFromInt96(PrimitiveTypeInfo hiveType, + Dictionary realReader, + boolean skipTimestampConversion) { + switch (hiveType.getPrimitiveCategory()) { + case STRING: + case CHAR: + case VARCHAR: + return new StringFromInt96DictionaryPageReader(realReader, skipTimestampConversion); + case INT: + case BYTE: + case SHORT: + case DATE: + case INTERVAL_YEAR_MONTH: + case LONG: + case BOOLEAN: + case DOUBLE: + case BINARY: + case FLOAT: + case DECIMAL: + case TIMESTAMP: + case INTERVAL_DAY_TIME: + return new DefaultDictionaryParquetDataColumnReader(realReader, skipTimestampConversion); + default: + throw new RuntimeException("Unsupported type conversion from Hive type: " + hiveType + " to " + + "Parquet int96"); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedDummyColumnReader.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedDummyColumnReader.java new file mode 100644 index 0000000..ee1d692 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedDummyColumnReader.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.io.parquet.vector; + +import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; + +import java.io.IOException; +import java.util.Arrays; + +/** + * A dummy vectorized parquet reader to support schema evolution. + */ +public class VectorizedDummyColumnReader extends BaseVectorizedColumnReader { + + public VectorizedDummyColumnReader() { + super(); + } + + @Override + public void readBatch(int total, ColumnVector column, TypeInfo columnType) throws IOException { + Arrays.fill(column.isNull, true); + column.isRepeating = true; + column.noNulls = false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java index c36640d..d00cee5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java @@ -46,8 +46,8 @@ boolean isFirstRow = true; public VectorizedListColumnReader(ColumnDescriptor descriptor, PageReader pageReader, - boolean skipTimestampConversion, Type type) throws IOException { - super(descriptor, pageReader, skipTimestampConversion, type); + boolean skipTimestampConversion, Type type, TypeInfo hiveType) throws IOException { + super(descriptor, pageReader, skipTimestampConversion, type, hiveType); } @Override @@ -147,24 +147,22 @@ private Object readPrimitiveTypedRow(PrimitiveObjectInspector.PrimitiveCategory case INT: case BYTE: case SHORT: - return dataColumn.readInteger(); + case LONG: + return dataColumn.readLong(); case DATE: case INTERVAL_YEAR_MONTH: - case LONG: - return dataColumn.readLong(); case BOOLEAN: - return dataColumn.readBoolean() ? 1 : 0; - case DOUBLE: - return dataColumn.readDouble(); + return dataColumn.readBoolean(); case BINARY: case STRING: case CHAR: case VARCHAR: - return dataColumn.readBytes().getBytesUnsafe(); + return dataColumn.readBytes(); case FLOAT: - return dataColumn.readFloat(); + case DOUBLE: + return dataColumn.readDouble(); case DECIMAL: - return dataColumn.readBytes().getBytesUnsafe(); + return dataColumn.readBytes(); case INTERVAL_DAY_TIME: case TIMESTAMP: default: @@ -205,7 +203,7 @@ private List decodeDictionaryIds(List valueList) { case FIXED_LEN_BYTE_ARRAY: resultList = new ArrayList(total); for (int i = 0; i < total; ++i) { - resultList.add(dictionary.decodeToBinary(intList.get(i)).getBytesUnsafe()); + resultList.add(dictionary.decodeToBinary(intList.get(i))); } break; default: diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java index 08ac57b..651fcd5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java @@ -1,9 +1,13 @@ /* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -11,9 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.io.parquet.vector; -import com.google.common.annotations.VisibleForTesting; +package org.apache.hadoop.hive.ql.io.parquet.vector; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -474,9 +477,13 @@ private VectorizedColumnReader buildVectorizedParquetReader( if (columnDescriptors == null || columnDescriptors.isEmpty()) { throw new RuntimeException( "Failed to find related Parquet column descriptor with type " + type); - } else { + } + if (fileSchema.getColumns().contains(descriptors.get(0))) { return new VectorizedPrimitiveColumnReader(descriptors.get(0), - pages.getPageReader(descriptors.get(0)), skipTimestampConversion, type); + pages.getPageReader(descriptors.get(0)), skipTimestampConversion, type, typeInfo); + } else { + // Support for schema evolution + return new VectorizedDummyColumnReader(); } case STRUCT: StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo; @@ -503,7 +510,7 @@ private VectorizedColumnReader buildVectorizedParquetReader( "Failed to find related Parquet column descriptor with type " + type); } return new VectorizedListColumnReader(descriptors.get(0), - pages.getPageReader(descriptors.get(0)), skipTimestampConversion, type); + pages.getPageReader(descriptors.get(0)), skipTimestampConversion, type, typeInfo); case MAP: if (columnDescriptors == null || columnDescriptors.isEmpty()) { throw new RuntimeException( @@ -535,10 +542,10 @@ private VectorizedColumnReader buildVectorizedParquetReader( List kvTypes = groupType.getFields(); VectorizedListColumnReader keyListColumnReader = new VectorizedListColumnReader( descriptors.get(0), pages.getPageReader(descriptors.get(0)), skipTimestampConversion, - kvTypes.get(0)); + kvTypes.get(0), typeInfo); VectorizedListColumnReader valueListColumnReader = new VectorizedListColumnReader( descriptors.get(1), pages.getPageReader(descriptors.get(1)), skipTimestampConversion, - kvTypes.get(1)); + kvTypes.get(1), typeInfo); return new VectorizedMapColumnReader(keyListColumnReader, valueListColumnReader); case UNION: default: diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedPrimitiveColumnReader.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedPrimitiveColumnReader.java index 39689f1..3e4344c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedPrimitiveColumnReader.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedPrimitiveColumnReader.java @@ -41,8 +41,8 @@ public VectorizedPrimitiveColumnReader( ColumnDescriptor descriptor, PageReader pageReader, boolean skipTimestampConversion, - Type type) throws IOException { - super(descriptor, pageReader, skipTimestampConversion, type); + Type type, TypeInfo hiveType) throws IOException { + super(descriptor, pageReader, skipTimestampConversion, type, hiveType); } @Override @@ -64,7 +64,7 @@ public void readBatch( LongColumnVector dictionaryIds = new LongColumnVector(); // Read and decode dictionary ids. readDictionaryIDs(num, dictionaryIds, rowId); - decodeDictionaryIds(rowId, num, column, dictionaryIds); + decodeDictionaryIds(rowId, num, column, columnType, dictionaryIds); } else { // assign values in vector readBatchHelper(num, column, columnType, rowId); @@ -148,7 +148,7 @@ private void readIntegers( while (left > 0) { readRepetitionAndDefinitionLevels(); if (definitionLevel >= maxDefLevel) { - c.vector[rowId] = dataColumn.readInteger(); + c.vector[rowId] = dataColumn.readLong(); c.isNull[rowId] = false; c.isRepeating = c.isRepeating && (c.vector[0] == c.vector[rowId]); } else { @@ -190,7 +190,7 @@ private void readBooleans( while (left > 0) { readRepetitionAndDefinitionLevels(); if (definitionLevel >= maxDefLevel) { - c.vector[rowId] = dataColumn.readBoolean() ? 1 : 0; + c.vector[rowId] = dataColumn.readBoolean(); c.isNull[rowId] = false; c.isRepeating = c.isRepeating && (c.vector[0] == c.vector[rowId]); } else { @@ -232,7 +232,7 @@ private void readFloats( while (left > 0) { readRepetitionAndDefinitionLevels(); if (definitionLevel >= maxDefLevel) { - c.vector[rowId] = dataColumn.readFloat(); + c.vector[rowId] = dataColumn.readDouble(); c.isNull[rowId] = false; c.isRepeating = c.isRepeating && (c.vector[0] == c.vector[rowId]); } else { @@ -255,7 +255,7 @@ private void readDecimal( while (left > 0) { readRepetitionAndDefinitionLevels(); if (definitionLevel >= maxDefLevel) { - c.vector[rowId].set(dataColumn.readBytes().getBytesUnsafe(), c.scale); + c.vector[rowId].set(dataColumn.readBytes(), c.scale); c.isNull[rowId] = false; c.isRepeating = c.isRepeating && (c.vector[0] == c.vector[rowId]); } else { @@ -276,7 +276,7 @@ private void readBinaries( while (left > 0) { readRepetitionAndDefinitionLevels(); if (definitionLevel >= maxDefLevel) { - c.setVal(rowId, dataColumn.readBytes().getBytesUnsafe()); + c.setVal(rowId, dataColumn.readBytes()); c.isNull[rowId] = false; // TODO figure out a better way to set repeat for Binary type c.isRepeating = false; @@ -298,9 +298,7 @@ private void readTimestamp(int total, TimestampColumnVector c, int rowId) throws switch (descriptor.getType()) { //INT64 is not yet supported case INT96: - NanoTime nt = NanoTime.fromBinary(dataColumn.readBytes()); - Timestamp ts = NanoTimeUtils.getTimestamp(nt, skipTimestampConversion); - c.set(rowId, ts); + c.set(rowId, dataColumn.readTimestamp()); break; default: throw new IOException( @@ -326,6 +324,7 @@ private void decodeDictionaryIds( int rowId, int num, ColumnVector column, + TypeInfo columnType, LongColumnVector dictionaryIds) { System.arraycopy(dictionaryIds.isNull, rowId, column.isNull, rowId, num); if (column.noNulls) { @@ -333,63 +332,72 @@ private void decodeDictionaryIds( } column.isRepeating = column.isRepeating && dictionaryIds.isRepeating; - switch (descriptor.getType()) { - case INT32: + + PrimitiveTypeInfo primitiveColumnType = (PrimitiveTypeInfo) columnType; + + switch (primitiveColumnType.getPrimitiveCategory()) { + case INT: + case BYTE: + case SHORT: for (int i = rowId; i < rowId + num; ++i) { ((LongColumnVector) column).vector[i] = - dictionary.decodeToInt((int) dictionaryIds.vector[i]); + dictionary.decodeToInt((int) dictionaryIds.vector[i]); } break; - case INT64: + case DATE: + case INTERVAL_YEAR_MONTH: + case LONG: for (int i = rowId; i < rowId + num; ++i) { ((LongColumnVector) column).vector[i] = - dictionary.decodeToLong((int) dictionaryIds.vector[i]); + dictionary.decodeToLong((int) dictionaryIds.vector[i]); } break; - case FLOAT: + case BOOLEAN: for (int i = rowId; i < rowId + num; ++i) { - ((DoubleColumnVector) column).vector[i] = - dictionary.decodeToFloat((int) dictionaryIds.vector[i]); + ((LongColumnVector) column).vector[i] = + dictionary.decodeToBoolean((int) dictionaryIds.vector[i]) ? 1 : 0; } break; case DOUBLE: for (int i = rowId; i < rowId + num; ++i) { ((DoubleColumnVector) column).vector[i] = - dictionary.decodeToDouble((int) dictionaryIds.vector[i]); + dictionary.decodeToDouble((int) dictionaryIds.vector[i]); } break; - case INT96: + case BINARY: + case STRING: + case CHAR: + case VARCHAR: for (int i = rowId; i < rowId + num; ++i) { - ByteBuffer buf = dictionary.decodeToBinary((int) dictionaryIds.vector[i]).toByteBuffer(); - buf.order(ByteOrder.LITTLE_ENDIAN); - long timeOfDayNanos = buf.getLong(); - int julianDay = buf.getInt(); - NanoTime nt = new NanoTime(julianDay, timeOfDayNanos); - Timestamp ts = NanoTimeUtils.getTimestamp(nt, skipTimestampConversion); - ((TimestampColumnVector) column).set(i, ts); + ((BytesColumnVector) column) + .setVal(i, dictionary.decodeToBinary((int) dictionaryIds.vector[i])); } break; - case BINARY: - case FIXED_LEN_BYTE_ARRAY: - if (column instanceof BytesColumnVector) { - for (int i = rowId; i < rowId + num; ++i) { - ((BytesColumnVector) column) - .setVal(i, dictionary.decodeToBinary((int) dictionaryIds.vector[i]).getBytesUnsafe()); - } - } else { - DecimalColumnVector decimalColumnVector = ((DecimalColumnVector) column); - decimalColumnVector.precision = + case FLOAT: + for (int i = rowId; i < rowId + num; ++i) { + ((DoubleColumnVector) column).vector[i] = + dictionary.decodeToFloat((int) dictionaryIds.vector[i]); + } + break; + case DECIMAL: + DecimalColumnVector decimalColumnVector = ((DecimalColumnVector) column); + decimalColumnVector.precision = (short) type.asPrimitiveType().getDecimalMetadata().getPrecision(); - decimalColumnVector.scale = (short) type.asPrimitiveType().getDecimalMetadata().getScale(); - for (int i = rowId; i < rowId + num; ++i) { - decimalColumnVector.vector[i] - .set(dictionary.decodeToBinary((int) dictionaryIds.vector[i]).getBytesUnsafe(), - decimalColumnVector.scale); - } + decimalColumnVector.scale = (short) type.asPrimitiveType().getDecimalMetadata().getScale(); + for (int i = rowId; i < rowId + num; ++i) { + decimalColumnVector.vector[i] + .set(dictionary.decodeToBinary((int) dictionaryIds.vector[i]), + decimalColumnVector.scale); } break; + case TIMESTAMP: + for (int i = rowId; i < rowId + num; ++i) { + ((TimestampColumnVector) column).set(i, dictionary.decodeToTimestamp((int) dictionaryIds.vector[i])); + } + break; + case INTERVAL_DAY_TIME: default: - throw new UnsupportedOperationException("Unsupported type: " + descriptor.getType()); + throw new UnsupportedOperationException("Unsupported type: " + type); } } } diff --git ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedColumnReader.java ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedColumnReader.java index 9e414dc..d0ddd92 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedColumnReader.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedColumnReader.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.io.IOConstants; import org.apache.hadoop.hive.ql.io.parquet.vector.VectorizedParquetRecordReader; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; import org.apache.hadoop.mapred.FileSplit; import org.apache.hadoop.mapred.JobConf; @@ -55,11 +56,22 @@ public static void cleanup() throws IOException { @Test public void testIntRead() throws Exception { intRead(isDictionaryEncoding); + longReadInt(isDictionaryEncoding); + floatReadInt(isDictionaryEncoding); + doubleReadInt(isDictionaryEncoding); } @Test public void testLongRead() throws Exception { longRead(isDictionaryEncoding); + floatReadLong(isDictionaryEncoding); + doubleReadLong(isDictionaryEncoding); + } + + @Test + public void testTimestamp() throws Exception { + timestampRead(isDictionaryEncoding); + stringReadTimestamp(isDictionaryEncoding); } @Test @@ -70,6 +82,7 @@ public void testDoubleRead() throws Exception { @Test public void testFloatRead() throws Exception { floatRead(isDictionaryEncoding); + doubleReadFloat(isDictionaryEncoding); } @Test diff --git ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedDictionaryEncodingColumnReader.java ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedDictionaryEncodingColumnReader.java index 3e5d831..ea23371 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedDictionaryEncodingColumnReader.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedDictionaryEncodingColumnReader.java @@ -41,11 +41,22 @@ public static void cleanup() throws IOException { @Test public void testIntRead() throws Exception { intRead(isDictionaryEncoding); + longReadInt(isDictionaryEncoding); + floatReadInt(isDictionaryEncoding); + doubleReadInt(isDictionaryEncoding); } @Test public void testLongRead() throws Exception { longRead(isDictionaryEncoding); + floatReadLong(isDictionaryEncoding); + doubleReadLong(isDictionaryEncoding); + } + + @Test + public void testTimestamp() throws Exception { + timestampRead(isDictionaryEncoding); + stringReadTimestamp(isDictionaryEncoding); } @Test @@ -56,6 +67,7 @@ public void testDoubleRead() throws Exception { @Test public void testFloatRead() throws Exception { floatRead(isDictionaryEncoding); + doubleReadFloat(isDictionaryEncoding); } @Test diff --git ql/src/test/org/apache/hadoop/hive/ql/io/parquet/VectorizedColumnReaderTestBase.java ql/src/test/org/apache/hadoop/hive/ql/io/parquet/VectorizedColumnReaderTestBase.java index 5d3ebd6..24ab5ef 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/parquet/VectorizedColumnReaderTestBase.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/parquet/VectorizedColumnReaderTestBase.java @@ -14,6 +14,8 @@ package org.apache.hadoop.hive.ql.io.parquet; +import junit.framework.Assert; +import org.apache.commons.collections.ListUtils; import org.apache.commons.lang.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -21,17 +23,12 @@ import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; -import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; -import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector; -import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; -import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector; -import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector; -import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; -import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx; +import org.apache.hadoop.hive.ql.exec.vector.*; import org.apache.hadoop.hive.ql.io.IOConstants; import org.apache.hadoop.hive.ql.io.parquet.read.DataWritableReadSupport; import org.apache.hadoop.hive.ql.io.parquet.serde.ArrayWritableObjectInspector; +import org.apache.hadoop.hive.ql.io.parquet.timestamp.NanoTime; +import org.apache.hadoop.hive.ql.io.parquet.timestamp.NanoTimeUtils; import org.apache.hadoop.hive.ql.io.parquet.vector.VectorizedParquetRecordReader; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.MapWork; @@ -43,8 +40,8 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapred.FileSplit; -import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; import org.apache.parquet.example.data.Group; import org.apache.parquet.example.data.simple.SimpleGroupFactory; @@ -54,9 +51,15 @@ import org.apache.parquet.hadoop.example.GroupWriteSupport; import org.apache.parquet.io.api.Binary; import org.apache.parquet.schema.MessageType; + import java.io.IOException; import java.math.BigDecimal; import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import static junit.framework.Assert.assertTrue; @@ -77,84 +80,84 @@ protected final static Path file = new Path("target/test/TestParquetVectorReader/testParquetFile"); protected static final MessageType schema = parseMessageType( - "message hive_schema { " - + "required int32 int32_field; " - + "required int64 int64_field; " - + "required int96 int96_field; " - + "required double double_field; " - + "required float float_field; " - + "required boolean boolean_field; " - + "required fixed_len_byte_array(3) flba_field; " - + "optional fixed_len_byte_array(1) some_null_field; " - + "optional fixed_len_byte_array(1) all_null_field; " - + "required binary binary_field; " - + "optional binary binary_field_some_null; " - + "required binary value (DECIMAL(5,2)); " - + "required group struct_field {" - + " required int32 a;\n" - + " required double b;\n" - + "}\n" - + "optional group nested_struct_field {" - + " optional group nsf {" - + " optional int32 c;\n" - + " optional int32 d;\n" - + " }\n" - + " optional double e;\n" - + "}\n" - + "optional group struct_field_some_null {" - + " optional int32 f;\n" - + " optional double g;\n" - + "}\n" - + "optional group map_field (MAP) {\n" - + " repeated group map (MAP_KEY_VALUE) {\n" - + " required binary key;\n" - + " optional binary value;\n" - + " }\n" - + "}\n" - + "optional group array_list (LIST) {\n" - + " repeated group bag {\n" - + " optional int32 array_element;\n" - + " }\n" - + "}\n" - + "repeated int32 list_int32_field;" - + "repeated int64 list_int64_field;" - + "repeated double list_double_field;" - + "repeated float list_float_field;" - + "repeated boolean list_boolean_field;" - + "repeated fixed_len_byte_array(3) list_byte_array_field;" - + "repeated binary list_binary_field;" - + "repeated binary list_decimal_field (DECIMAL(5,2));" - + "repeated binary list_binary_field_for_repeat_test;" - + "repeated int32 list_int32_field_for_repeat_test;" - + "repeated group map_int32 (MAP_KEY_VALUE) {\n" - + " required int32 key;\n" - + " optional int32 value;\n" - + "}\n" - + "repeated group map_int64 (MAP_KEY_VALUE) {\n" - + " required int64 key;\n" - + " optional int64 value;\n" - + "}\n" - + "repeated group map_double (MAP_KEY_VALUE) {\n" - + " required double key;\n" - + " optional double value;\n" - + "}\n" - + "repeated group map_float (MAP_KEY_VALUE) {\n" - + " required float key;\n" - + " optional float value;\n" - + "}\n" - + "repeated group map_binary (MAP_KEY_VALUE) {\n" - + " required binary key;\n" - + " optional binary value;\n" - + "}\n" - + "repeated group map_decimal (MAP_KEY_VALUE) {\n" - + " required binary key (DECIMAL(5,2));\n" - + " optional binary value (DECIMAL(5,2));\n" - + "}\n" - + "repeated group map_int32_for_repeat_test (MAP_KEY_VALUE) {\n" - + " required int32 key;\n" - + " optional int32 value;\n" - + "}\n" - + "} "); + "message hive_schema { " + + "required int32 int32_field; " + + "required int64 int64_field; " + + "required int96 int96_field; " + + "required double double_field; " + + "required float float_field; " + + "required boolean boolean_field; " + + "required fixed_len_byte_array(3) flba_field; " + + "optional fixed_len_byte_array(1) some_null_field; " + + "optional fixed_len_byte_array(1) all_null_field; " + + "required binary binary_field; " + + "optional binary binary_field_some_null; " + + "required binary value (DECIMAL(5,2)); " + + "required group struct_field {" + + " required int32 a;\n" + + " required double b;\n" + + "}\n" + + "optional group nested_struct_field {" + + " optional group nsf {" + + " optional int32 c;\n" + + " optional int32 d;\n" + + " }\n" + + " optional double e;\n" + + "}\n" + + "optional group struct_field_some_null {" + + " optional int32 f;\n" + + " optional double g;\n" + + "}\n" + + "optional group map_field (MAP) {\n" + + " repeated group map (MAP_KEY_VALUE) {\n" + + " required binary key;\n" + + " optional binary value;\n" + + " }\n" + + "}\n" + + "optional group array_list (LIST) {\n" + + " repeated group bag {\n" + + " optional int32 array_element;\n" + + " }\n" + + "}\n" + + "repeated int32 list_int32_field;" + + "repeated int64 list_int64_field;" + + "repeated double list_double_field;" + + "repeated float list_float_field;" + + "repeated boolean list_boolean_field;" + + "repeated fixed_len_byte_array(3) list_byte_array_field;" + + "repeated binary list_binary_field;" + + "repeated binary list_decimal_field (DECIMAL(5,2));" + + "repeated binary list_binary_field_for_repeat_test;" + + "repeated int32 list_int32_field_for_repeat_test;" + + "repeated group map_int32 (MAP_KEY_VALUE) {\n" + + " required int32 key;\n" + + " optional int32 value;\n" + + "}\n" + + "repeated group map_int64 (MAP_KEY_VALUE) {\n" + + " required int64 key;\n" + + " optional int64 value;\n" + + "}\n" + + "repeated group map_double (MAP_KEY_VALUE) {\n" + + " required double key;\n" + + " optional double value;\n" + + "}\n" + + "repeated group map_float (MAP_KEY_VALUE) {\n" + + " required float key;\n" + + " optional float value;\n" + + "}\n" + + "repeated group map_binary (MAP_KEY_VALUE) {\n" + + " required binary key;\n" + + " optional binary value;\n" + + "}\n" + + "repeated group map_decimal (MAP_KEY_VALUE) {\n" + + " required binary key (DECIMAL(5,2));\n" + + " optional binary value (DECIMAL(5,2));\n" + + "}\n" + + "repeated group map_int32_for_repeat_test (MAP_KEY_VALUE) {\n" + + " required int32 key;\n" + + " optional int32 value;\n" + + "}\n" + + "} "); protected static void removeFile() throws IOException { FileSystem fs = file.getFileSystem(conf); @@ -166,56 +169,51 @@ protected static void removeFile() throws IOException { protected static ParquetWriter initWriterFromFile() throws IOException { GroupWriteSupport.setSchema(schema, conf); return new ParquetWriter<>( - file, - new GroupWriteSupport(), - GZIP, 1024 * 1024, 1024, 1024 * 1024, - true, false, PARQUET_1_0, conf); + file, + new GroupWriteSupport(), + GZIP, 1024 * 1024, 1024, 1024 * 1024, + true, false, PARQUET_1_0, conf); } protected static int getIntValue( - boolean isDictionaryEncoding, - int index) { + boolean isDictionaryEncoding, + int index) { return isDictionaryEncoding ? index % UNIQUE_NUM : index; } protected static double getDoubleValue( - boolean isDictionaryEncoding, - int index) { + boolean isDictionaryEncoding, + int index) { return isDictionaryEncoding ? index % UNIQUE_NUM : index; } protected static long getLongValue( - boolean isDictionaryEncoding, - int index) { + boolean isDictionaryEncoding, + int index) { return isDictionaryEncoding ? (long) 2 * index % UNIQUE_NUM : (long) 2 * index; } protected static float getFloatValue( - boolean isDictionaryEncoding, - int index) { + boolean isDictionaryEncoding, + int index) { return (float) (isDictionaryEncoding ? index % UNIQUE_NUM * 2.0 : index * 2.0); } protected static boolean getBooleanValue( - float index) { + float index) { return (index % 2 == 0); } - protected static String getTimestampStr(int index) { - String s = String.valueOf(index); - int l = 4 - s.length(); - for (int i = 0; i < l; i++) { - s = "0" + s; - } - return "99999999" + s; + protected static NanoTime getNanoTime(int index) { + return NanoTimeUtils.getNanoTime(new Timestamp(index), false); } protected static HiveDecimal getDecimal( - boolean isDictionaryEncoding, - int index) { + boolean isDictionaryEncoding, + int index) { int decimalVal = index % 100; String decimalStr = (decimalVal < 10) ? "0" + String.valueOf(decimalVal) : String.valueOf - (decimalVal); + (decimalVal); int intVal = (isDictionaryEncoding) ? index % UNIQUE_NUM : index / 100; String d = String.valueOf(intVal) + decimalStr; BigInteger bi = new BigInteger(d); @@ -224,15 +222,15 @@ protected static HiveDecimal getDecimal( } protected static Binary getTimestamp( - boolean isDictionaryEncoding, - int index) { - String s = isDictionaryEncoding ? getTimestampStr(index % UNIQUE_NUM) : getTimestampStr(index); - return Binary.fromReusedByteArray(s.getBytes()); + boolean isDictionaryEncoding, + int index) { + NanoTime s = isDictionaryEncoding ? getNanoTime(index % UNIQUE_NUM) : getNanoTime(index); + return s.toBinary(); } protected static String getStr( - boolean isDictionaryEncoding, - int index) { + boolean isDictionaryEncoding, + int index) { int binaryLen = isDictionaryEncoding ? index % UNIQUE_NUM : index; String v = ""; while (binaryLen > 0) { @@ -244,8 +242,8 @@ protected static String getStr( } protected static Binary getBinaryValue( - boolean isDictionaryEncoding, - int index) { + boolean isDictionaryEncoding, + int index) { return Binary.fromString(getStr(isDictionaryEncoding, index)); } @@ -254,20 +252,20 @@ protected static boolean isNull(int index) { } public static VectorizedParquetRecordReader createTestParquetReader(String schemaString, Configuration conf) - throws IOException, InterruptedException, HiveException { + throws IOException, InterruptedException, HiveException { conf.set(PARQUET_READ_SCHEMA, schemaString); HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, true); HiveConf.setVar(conf, HiveConf.ConfVars.PLAN, "//tmp"); Job vectorJob = new Job(conf, "read vector"); ParquetInputFormat.setInputPaths(vectorJob, file); initialVectorizedRowBatchCtx(conf); - return new VectorizedParquetRecordReader(getFileSplit(vectorJob),new JobConf(conf)); + return new VectorizedParquetRecordReader(getFileSplit(vectorJob), new JobConf(conf)); } protected static FileSplit getFileSplit(Job vectorJob) throws IOException, InterruptedException { ParquetInputFormat parquetInputFormat = new ParquetInputFormat(GroupReadSupport.class); InputSplit split = (InputSplit) parquetInputFormat.getSplits(vectorJob).get(0); - FileSplit fsplit = new FileSplit(file,0L,split.getLength(),split.getLocations()); + FileSplit fsplit = new FileSplit(file, 0L, split.getLength(), split.getLocations()); return fsplit; } @@ -284,13 +282,13 @@ protected static void writeData(ParquetWriter writer, boolean isDictionar boolean booleanVal = getBooleanValue(i); Binary binary = getBinaryValue(isDictionaryEncoding, i); Group group = f.newGroup() - .append("int32_field", intVal) - .append("int64_field", longVal) - .append("int96_field", timeStamp) - .append("double_field", doubleVal) - .append("float_field", floatVal) - .append("boolean_field", booleanVal) - .append("flba_field", "abc"); + .append("int32_field", intVal) + .append("int64_field", longVal) + .append("int96_field", timeStamp) + .append("double_field", doubleVal) + .append("float_field", floatVal) + .append("boolean_field", booleanVal) + .append("flba_field", "abc"); if (!isNull) { group.append("some_null_field", "x"); @@ -306,8 +304,8 @@ protected static void writeData(ParquetWriter writer, boolean isDictionar group.append("value", Binary.fromConstantByteArray(w.getInternalStorage())); group.addGroup("struct_field") - .append("a", intVal) - .append("b", doubleVal); + .append("a", intVal) + .append("b", doubleVal); Group g = group.addGroup("nested_struct_field"); @@ -358,14 +356,184 @@ private static StructObjectInspector createStructObjectInspector(Configuration c return new ArrayWritableObjectInspector((StructTypeInfo) rowTypeInfo); } - protected void intRead(boolean isDictionaryEncoding) throws InterruptedException, HiveException, IOException { - Configuration conf = new Configuration(); - conf.set(IOConstants.COLUMNS,"int32_field"); - conf.set(IOConstants.COLUMNS_TYPES,"int"); + protected void timestampRead(boolean isDictionaryEncoding) throws InterruptedException, + HiveException, IOException { + conf.set(IOConstants.COLUMNS, "int96_field"); + conf.set(IOConstants.COLUMNS_TYPES, "timestamp"); + conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); + conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); + VectorizedParquetRecordReader reader = createTestParquetReader("message test { required " + + "int96 int96_field;}", conf); + VectorizedRowBatch previous = reader.createValue(); + try { + int c = 0; + while (reader.next(NullWritable.get(), previous)) { + TimestampColumnVector vector = (TimestampColumnVector) previous.cols[0]; + assertTrue(vector.noNulls); + for (int i = 0; i < vector.nanos.length; i++) { + if (c == nElements) { + break; + } + Timestamp expected = isDictionaryEncoding ? new Timestamp(c % UNIQUE_NUM) : new Timestamp(c); + assertEquals("Not the same time at " + c, expected.getTime(), vector.getTime(i)); + assertEquals("Not the same nano at " + c, expected.getNanos(), vector.getNanos(i)); + assertFalse(vector.isNull[i]); + c++; + } + } + assertEquals(nElements, c); + } finally { + reader.close(); + } + } + + protected void stringReadTimestamp(boolean isDictionaryEncoding) throws InterruptedException, + HiveException, IOException { + conf.set(IOConstants.COLUMNS, "int96_field"); + conf.set(IOConstants.COLUMNS_TYPES, "string"); + conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); + conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); + VectorizedParquetRecordReader reader = createTestParquetReader("message test { required " + + "int96 int96_field;}", conf); + VectorizedRowBatch previous = reader.createValue(); + try { + int c = 0; + while (reader.next(NullWritable.get(), previous)) { + BytesColumnVector vector = (BytesColumnVector) previous.cols[0]; + assertTrue(vector.noNulls); + for (int i = 0; i < vector.vector.length; i++) { + if (c == nElements) { + break; + } + + Timestamp expected = isDictionaryEncoding ? new Timestamp(c % UNIQUE_NUM) : new Timestamp(c); + String actual = new String(Arrays.copyOfRange(vector.vector[i], vector.start[i], vector + .start[i] + vector.length[i])); + assertEquals("Not the same time at " + c, expected.toString(), actual); + + assertFalse(vector.isNull[i]); + c++; + } + } + assertEquals(nElements, c); + } finally { + reader.close(); + } + } + + protected void timestampRead(boolean isDictionaryEncoding, Configuration conf) throws + InterruptedException, + HiveException, IOException { + VectorizedParquetRecordReader reader = createTestParquetReader("message test { required " + + "int96 int96_field;}", conf); + VectorizedRowBatch previous = reader.createValue(); + try { + int c = 0; + while (reader.next(NullWritable.get(), previous)) { + TimestampColumnVector vector = (TimestampColumnVector) previous.cols[0]; + assertTrue(vector.noNulls); + for (int i = 0; i < vector.nanos.length; i++) { + if (c == nElements) { + break; + } + Timestamp actual = new Timestamp(0); + actual.setNanos(vector.nanos[i]); + actual.setTime(vector.time[i]); + assertEquals("Failed at " + c, getNanoTime(c), NanoTime.fromBinary(getTimestamp + (isDictionaryEncoding, + c))); + assertFalse(vector.isNull[i]); + c++; + } + } + assertEquals(nElements, c); + } finally { + reader.close(); + } + } + + protected void floatReadInt(boolean isDictionaryEncoding) throws InterruptedException, + HiveException, IOException { + conf.set(IOConstants.COLUMNS, "int32_field"); + conf.set(IOConstants.COLUMNS_TYPES, "float"); + conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); + conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); + VectorizedParquetRecordReader reader = createTestParquetReader("message test { required int32" + + " int32_field;}", conf); + VectorizedRowBatch previous = reader.createValue(); + try { + int c = 0; + while (reader.next(NullWritable.get(), previous)) { + DoubleColumnVector vector = (DoubleColumnVector) previous.cols[0]; + assertTrue(vector.noNulls); + for (int i = 0; i < vector.vector.length; i++) { + if (c == nElements) { + break; + } + assertEquals("Failed at " + c, getIntValue(isDictionaryEncoding, c), vector.vector[i], 0); + assertFalse(vector.isNull[i]); + c++; + } + } + assertEquals(nElements, c); + } finally { + reader.close(); + } + } + + protected void doubleReadInt(boolean isDictionaryEncoding) throws InterruptedException, + HiveException, IOException { + conf.set(IOConstants.COLUMNS, "int32_field"); + conf.set(IOConstants.COLUMNS_TYPES, "double"); conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); VectorizedParquetRecordReader reader = - createTestParquetReader("message test { required int32 int32_field;}", conf); + createTestParquetReader("message test { required int32 int32_field;}", conf); + VectorizedRowBatch previous = reader.createValue(); + try { + int c = 0; + while (reader.next(NullWritable.get(), previous)) { + DoubleColumnVector vector = (DoubleColumnVector) previous.cols[0]; + assertTrue(vector.noNulls); + for (int i = 0; i < vector.vector.length; i++) { + if (c == nElements) { + break; + } + assertEquals("Failed at " + c, getIntValue(isDictionaryEncoding, c), vector.vector[i], 0); + assertFalse(vector.isNull[i]); + c++; + } + } + assertEquals(nElements, c); + } finally { + reader.close(); + } + } + + protected void longReadInt(boolean isDictionaryEncoding) throws InterruptedException, + HiveException, IOException { + Configuration c = new Configuration(); + c.set(IOConstants.COLUMNS, "int32_field"); + c.set(IOConstants.COLUMNS_TYPES, "bigint"); + c.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); + c.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); + intRead(isDictionaryEncoding, c); + } + + protected void intRead(boolean isDictionaryEncoding) throws InterruptedException, + HiveException, IOException { + Configuration c = new Configuration(); + c.set(IOConstants.COLUMNS, "int32_field"); + c.set(IOConstants.COLUMNS_TYPES, "int"); + c.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); + c.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); + intRead(isDictionaryEncoding, c); + } + + private void intRead(boolean isDictionaryEncoding, Configuration conf) throws + InterruptedException, HiveException, IOException { + VectorizedParquetRecordReader reader = + createTestParquetReader("message test { required int32 int32_field;}", conf); VectorizedRowBatch previous = reader.createValue(); try { int c = 0; @@ -373,7 +541,7 @@ protected void intRead(boolean isDictionaryEncoding) throws InterruptedException LongColumnVector vector = (LongColumnVector) previous.cols[0]; assertTrue(vector.noNulls); for (int i = 0; i < vector.vector.length; i++) { - if(c == nElements){ + if (c == nElements) { break; } assertEquals("Failed at " + c, getIntValue(isDictionaryEncoding, c), vector.vector[i]); @@ -387,14 +555,78 @@ protected void intRead(boolean isDictionaryEncoding) throws InterruptedException } } + protected void floatReadLong(boolean isDictionaryEncoding) throws Exception { + Configuration c = new Configuration(); + c.set(IOConstants.COLUMNS, "int64_field"); + c.set(IOConstants.COLUMNS_TYPES, "float"); + c.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); + c.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); + VectorizedParquetRecordReader reader = + createTestParquetReader("message test { required int64 int64_field;}", c); + VectorizedRowBatch previous = reader.createValue(); + try { + int count = 0; + while (reader.next(NullWritable.get(), previous)) { + DoubleColumnVector vector = (DoubleColumnVector) previous.cols[0]; + assertTrue(vector.noNulls); + for (int i = 0; i < vector.vector.length; i++) { + if (count == nElements) { + break; + } + assertEquals("Failed at " + count, getLongValue(isDictionaryEncoding, count), vector + .vector[i], 0); + assertFalse(vector.isNull[i]); + count++; + } + } + assertEquals(nElements, count); + } finally { + reader.close(); + } + } + + protected void doubleReadLong(boolean isDictionaryEncoding) throws Exception { + Configuration c = new Configuration(); + c.set(IOConstants.COLUMNS, "int64_field"); + c.set(IOConstants.COLUMNS_TYPES, "double"); + c.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); + c.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); + VectorizedParquetRecordReader reader = + createTestParquetReader("message test { required int64 int64_field;}", c); + VectorizedRowBatch previous = reader.createValue(); + try { + int count = 0; + while (reader.next(NullWritable.get(), previous)) { + DoubleColumnVector vector = (DoubleColumnVector) previous.cols[0]; + assertTrue(vector.noNulls); + for (int i = 0; i < vector.vector.length; i++) { + if (count == nElements) { + break; + } + assertEquals("Failed at " + count, getLongValue(isDictionaryEncoding, count), vector + .vector[i], 0); + assertFalse(vector.isNull[i]); + count++; + } + } + assertEquals(nElements, count); + } finally { + reader.close(); + } + } + protected void longRead(boolean isDictionaryEncoding) throws Exception { - Configuration conf = new Configuration(); - conf.set(IOConstants.COLUMNS, "int64_field"); - conf.set(IOConstants.COLUMNS_TYPES, "bigint"); - conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); - conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); + Configuration c = new Configuration(); + c.set(IOConstants.COLUMNS, "int64_field"); + c.set(IOConstants.COLUMNS_TYPES, "bigint"); + c.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); + c.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); + longRead(isDictionaryEncoding, c); + } + + private void longRead(boolean isDictionaryEncoding, Configuration conf) throws Exception { VectorizedParquetRecordReader reader = - createTestParquetReader("message test { required int64 int64_field;}", conf); + createTestParquetReader("message test { required int64 int64_field;}", conf); VectorizedRowBatch previous = reader.createValue(); try { int c = 0; @@ -417,13 +649,17 @@ protected void longRead(boolean isDictionaryEncoding) throws Exception { } protected void doubleRead(boolean isDictionaryEncoding) throws Exception { - Configuration conf = new Configuration(); - conf.set(IOConstants.COLUMNS, "double_field"); - conf.set(IOConstants.COLUMNS_TYPES, "double"); - conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); - conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); + Configuration c = new Configuration(); + c.set(IOConstants.COLUMNS, "double_field"); + c.set(IOConstants.COLUMNS_TYPES, "double"); + c.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); + c.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); + doubleRead(isDictionaryEncoding, c); + } + + private void doubleRead(boolean isDictionaryEncoding, Configuration conf) throws Exception { VectorizedParquetRecordReader reader = - createTestParquetReader("message test { required double double_field;}", conf); + createTestParquetReader("message test { required double double_field;}", conf); VectorizedRowBatch previous = reader.createValue(); try { int c = 0; @@ -435,7 +671,7 @@ protected void doubleRead(boolean isDictionaryEncoding) throws Exception { break; } assertEquals("Failed at " + c, getDoubleValue(isDictionaryEncoding, c), vector.vector[i], - 0); + 0); assertFalse(vector.isNull[i]); c++; } @@ -447,13 +683,26 @@ protected void doubleRead(boolean isDictionaryEncoding) throws Exception { } protected void floatRead(boolean isDictionaryEncoding) throws Exception { - Configuration conf = new Configuration(); - conf.set(IOConstants.COLUMNS, "float_field"); - conf.set(IOConstants.COLUMNS_TYPES, "float"); - conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); - conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); + Configuration c = new Configuration(); + c.set(IOConstants.COLUMNS, "float_field"); + c.set(IOConstants.COLUMNS_TYPES, "float"); + c.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); + c.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); + floatRead(isDictionaryEncoding, c); + } + + protected void doubleReadFloat(boolean isDictionaryEncoding) throws Exception { + Configuration c = new Configuration(); + c.set(IOConstants.COLUMNS, "float_field"); + c.set(IOConstants.COLUMNS_TYPES, "double"); + c.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); + c.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); + floatRead(isDictionaryEncoding, c); + } + + private void floatRead(boolean isDictionaryEncoding, Configuration conf) throws Exception { VectorizedParquetRecordReader reader = - createTestParquetReader("message test { required float float_field;}", conf); + createTestParquetReader("message test { required float float_field;}", conf); VectorizedRowBatch previous = reader.createValue(); try { int c = 0; @@ -465,7 +714,7 @@ protected void floatRead(boolean isDictionaryEncoding) throws Exception { break; } assertEquals("Failed at " + c, getFloatValue(isDictionaryEncoding, c), vector.vector[i], - 0); + 0); assertFalse(vector.isNull[i]); c++; } @@ -483,7 +732,7 @@ protected void booleanRead() throws Exception { conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); VectorizedParquetRecordReader reader = - createTestParquetReader("message test { required boolean boolean_field;}", conf); + createTestParquetReader("message test { required boolean boolean_field;}", conf); VectorizedRowBatch previous = reader.createValue(); try { int c = 0; @@ -512,7 +761,7 @@ protected void binaryRead(boolean isDictionaryEncoding) throws Exception { conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); VectorizedParquetRecordReader reader = - createTestParquetReader("message test { required binary binary_field_some_null;}", conf); + createTestParquetReader("message test { required binary binary_field_some_null;}", conf); VectorizedRowBatch previous = reader.createValue(); int c = 0; try { @@ -527,7 +776,7 @@ protected void binaryRead(boolean isDictionaryEncoding) throws Exception { assertEquals("Null assert failed at " + c, isNull(c), vector.isNull[i]); if (!vector.isNull[i]) { actual = new String(ArrayUtils - .subarray(vector.vector[i], vector.start[i], vector.start[i] + vector.length[i])); + .subarray(vector.vector[i], vector.start[i], vector.start[i] + vector.length[i])); assertEquals("failed at " + c, getStr(isDictionaryEncoding, c), actual); } else { noNull = false; @@ -550,11 +799,11 @@ protected void structRead(boolean isDictionaryEncoding) throws Exception { conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); String schema = "message hive_schema {\n" - + "group struct_field {\n" - + " optional int32 a;\n" - + " optional double b;\n" - + "}\n" - + "}\n"; + + "group struct_field {\n" + + " optional int32 a;\n" + + " optional double b;\n" + + "}\n" + + "}\n"; VectorizedParquetRecordReader reader = createTestParquetReader(schema, conf); VectorizedRowBatch previous = reader.createValue(); int c = 0; @@ -588,13 +837,13 @@ protected void nestedStructRead0(boolean isDictionaryEncoding) throws Exception conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); String schema = "message hive_schema {\n" - + "group nested_struct_field {\n" - + " optional group nsf {\n" - + " optional int32 c;\n" - + " optional int32 d;\n" - + " }" - + "optional double e;\n" - + "}\n"; + + "group nested_struct_field {\n" + + " optional group nsf {\n" + + " optional int32 c;\n" + + " optional int32 d;\n" + + " }" + + "optional double e;\n" + + "}\n"; VectorizedParquetRecordReader reader = createTestParquetReader(schema, conf); VectorizedRowBatch previous = reader.createValue(); int c = 0; @@ -631,11 +880,11 @@ protected void nestedStructRead1(boolean isDictionaryEncoding) throws Exception conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); String schema = "message hive_schema {\n" - + "group nested_struct_field {\n" - + " optional group nsf {\n" - + " optional int32 c;\n" - + " }" - + "}\n"; + + "group nested_struct_field {\n" + + " optional group nsf {\n" + + " optional int32 c;\n" + + " }" + + "}\n"; VectorizedParquetRecordReader reader = createTestParquetReader(schema, conf); VectorizedRowBatch previous = reader.createValue(); int c = 0; @@ -668,10 +917,10 @@ protected void structReadSomeNull(boolean isDictionaryEncoding) throws Exception conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); String schema = "message hive_schema {\n" - + "group struct_field_some_null {\n" - + " optional int32 f;\n" - + " optional double g;\n" - + "}\n"; + + "group struct_field_some_null {\n" + + " optional int32 f;\n" + + " optional double g;\n" + + "}\n"; VectorizedParquetRecordReader reader = createTestParquetReader(schema, conf); VectorizedRowBatch previous = reader.createValue(); int c = 0; @@ -713,7 +962,7 @@ protected void decimalRead(boolean isDictionaryEncoding) throws Exception { conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); VectorizedParquetRecordReader reader = - createTestParquetReader("message hive_schema { required value (DECIMAL(5,2));}", conf); + createTestParquetReader("message hive_schema { required value (DECIMAL(5,2));}", conf); VectorizedRowBatch previous = reader.createValue(); try { int c = 0; @@ -725,7 +974,7 @@ protected void decimalRead(boolean isDictionaryEncoding) throws Exception { break; } assertEquals("Check failed at pos " + c, getDecimal(isDictionaryEncoding, c), - vector.vector[i].getHiveDecimal()); + vector.vector[i].getHiveDecimal()); assertFalse(vector.isNull[i]); c++; } diff --git ql/src/test/queries/clientpositive/schema_evol_par_vec_table_dictionary_encoding.q ql/src/test/queries/clientpositive/schema_evol_par_vec_table_dictionary_encoding.q new file mode 100644 index 0000000..6b706ab --- /dev/null +++ ql/src/test/queries/clientpositive/schema_evol_par_vec_table_dictionary_encoding.q @@ -0,0 +1,94 @@ +set hive.fetch.task.conversion=none; +set hive.vectorized.execution.enabled=true; +set parquet.enable.dictionary=true; + +drop table test_alter; +drop table test_alter2; +drop table test_alter3; + +create table test_alter (id string) stored as parquet; +insert into test_alter values ('1'), ('2'), ('3'); +select * from test_alter; + +-- add new column -> empty col values should return NULL +alter table test_alter add columns (newCol string); +select * from test_alter; + +-- insert data into new column -> New data should be returned +insert into test_alter values ('4', '100'); +select * from test_alter; + +-- remove the newly added column +-- this works in vectorized execution +alter table test_alter replace columns (id string); +select * from test_alter; + +-- add column using replace column syntax +alter table test_alter replace columns (id string, id2 string); +-- this surprisingly doesn't return the 100 added to 4th row above +select * from test_alter; +insert into test_alter values ('5', '100'); +select * from test_alter; + +-- use the same column name and datatype +alter table test_alter replace columns (id string, id2 string); +select * from test_alter; + +-- change string to char +alter table test_alter replace columns (id char(10), id2 string); +select * from test_alter; + +-- change string to varchar +alter table test_alter replace columns (id string, id2 string); +alter table test_alter replace columns (id varchar(10), id2 string); +select * from test_alter; + +-- change columntype and column name +alter table test_alter replace columns (id string, id2 string); +alter table test_alter replace columns (idv varchar(10), id2 string); +select * from test_alter; + +-- test int to long type conversion +create table test_alter2 (id int) stored as parquet; +insert into test_alter2 values (1); +alter table test_alter2 replace columns (id bigint); +select * from test_alter2; + +-- test float to double type conversion +drop table test_alter2; +create table test_alter2 (id float) stored as parquet; +insert into test_alter2 values (1.5); +alter table test_alter2 replace columns (id double); +select * from test_alter2; + +drop table test_alter2; +create table test_alter2 (ts timestamp) stored as parquet; +insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456'); +select * from test_alter2; +alter table test_alter2 replace columns (ts string); +select * from test_alter2; + +drop table test_alter2; +create table test_alter2 (ts timestamp) stored as parquet; +insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456'); +select * from test_alter2; +alter table test_alter2 replace columns (ts varchar(19)); +-- this should truncate the microseconds +select * from test_alter2; + +drop table test_alter2; +create table test_alter2 (ts timestamp) stored as parquet; +insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456'); +select * from test_alter2; +alter table test_alter2 replace columns (ts char(25)); +select * from test_alter2; + +-- test integer types upconversion +create table test_alter3 (id1 tinyint, id2 smallint, id3 int, id4 bigint) stored as parquet; +insert into test_alter3 values (10, 20, 30, 40); +alter table test_alter3 replace columns (id1 smallint, id2 int, id3 bigint, id4 decimal(10,4)); +-- this fails mostly due to bigint to decimal +-- select * from test_alter3; +select id1, id2, id3 from test_alter3; + + diff --git ql/src/test/queries/clientpositive/schema_evol_par_vec_table_non_dictionary_encoding.q ql/src/test/queries/clientpositive/schema_evol_par_vec_table_non_dictionary_encoding.q new file mode 100644 index 0000000..3006bd4 --- /dev/null +++ ql/src/test/queries/clientpositive/schema_evol_par_vec_table_non_dictionary_encoding.q @@ -0,0 +1,94 @@ +set hive.fetch.task.conversion=none; +set hive.vectorized.execution.enabled=true; +set parquet.enable.dictionary=false; + +drop table test_alter; +drop table test_alter2; +drop table test_alter3; + +create table test_alter (id string) stored as parquet; +insert into test_alter values ('1'), ('2'), ('3'); +select * from test_alter; + +-- add new column -> empty col values should return NULL +alter table test_alter add columns (newCol string); +select * from test_alter; + +-- insert data into new column -> New data should be returned +insert into test_alter values ('4', '100'); +select * from test_alter; + +-- remove the newly added column +-- this works in vectorized execution +alter table test_alter replace columns (id string); +select * from test_alter; + +-- add column using replace column syntax +alter table test_alter replace columns (id string, id2 string); +-- this surprisingly doesn't return the 100 added to 4th row above +select * from test_alter; +insert into test_alter values ('5', '100'); +select * from test_alter; + +-- use the same column name and datatype +alter table test_alter replace columns (id string, id2 string); +select * from test_alter; + +-- change string to char +alter table test_alter replace columns (id char(10), id2 string); +select * from test_alter; + +-- change string to varchar +alter table test_alter replace columns (id string, id2 string); +alter table test_alter replace columns (id varchar(10), id2 string); +select * from test_alter; + +-- change columntype and column name +alter table test_alter replace columns (id string, id2 string); +alter table test_alter replace columns (idv varchar(10), id2 string); +select * from test_alter; + +-- test int to long type conversion +create table test_alter2 (id int) stored as parquet; +insert into test_alter2 values (1); +alter table test_alter2 replace columns (id bigint); +select * from test_alter2; + +-- test float to double type conversion +drop table test_alter2; +create table test_alter2 (id float) stored as parquet; +insert into test_alter2 values (1.5); +alter table test_alter2 replace columns (id double); +select * from test_alter2; + +drop table test_alter2; +create table test_alter2 (ts timestamp) stored as parquet; +insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456'); +select * from test_alter2; +alter table test_alter2 replace columns (ts string); +select * from test_alter2; + +drop table test_alter2; +create table test_alter2 (ts timestamp) stored as parquet; +insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456'); +select * from test_alter2; +alter table test_alter2 replace columns (ts varchar(19)); +-- this should truncate the microseconds +select * from test_alter2; + +drop table test_alter2; +create table test_alter2 (ts timestamp) stored as parquet; +insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456'); +select * from test_alter2; +alter table test_alter2 replace columns (ts char(25)); +select * from test_alter2; + +-- test integer types upconversion +create table test_alter3 (id1 tinyint, id2 smallint, id3 int, id4 bigint) stored as parquet; +insert into test_alter3 values (10, 20, 30, 40); +alter table test_alter3 replace columns (id1 smallint, id2 int, id3 bigint, id4 decimal(10,4)); +-- this fails mostly due to bigint to decimal +-- select * from test_alter3; +select id1, id2, id3 from test_alter3; + + diff --git ql/src/test/results/clientpositive/schema_evol_par_vec_table.q.out ql/src/test/results/clientpositive/schema_evol_par_vec_table.q.out new file mode 100644 index 0000000..a6128b6 --- /dev/null +++ ql/src/test/results/clientpositive/schema_evol_par_vec_table.q.out @@ -0,0 +1,357 @@ +PREHOOK: query: drop table test_alter +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table test_alter +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table test_alter2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table test_alter2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table test_alter3 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table test_alter3 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table test_alter (id string) stored as parquet +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_alter +POSTHOOK: query: create table test_alter (id string) stored as parquet +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_alter +PREHOOK: query: insert into test_alter values ('1'), ('2'), ('3') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_alter +POSTHOOK: query: insert into test_alter values ('1'), ('2'), ('3') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_alter +POSTHOOK: Lineage: test_alter.id SCRIPT [] +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +1 +2 +3 +PREHOOK: query: alter table test_alter add columns (newCol string) +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter add columns (newCol string) +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +1 NULL +2 NULL +3 NULL +PREHOOK: query: insert into test_alter values ('4', '100') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_alter +POSTHOOK: query: insert into test_alter values ('4', '100') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_alter +POSTHOOK: Lineage: test_alter.id SCRIPT [] +POSTHOOK: Lineage: test_alter.newcol SCRIPT [] +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +1 NULL +2 NULL +3 NULL +4 100 +PREHOOK: query: alter table test_alter replace columns (id string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (id string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +1 +2 +3 +4 +PREHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (id string, id2 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +1 NULL +2 NULL +3 NULL +4 NULL +PREHOOK: query: insert into test_alter values ('5', '100') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_alter +POSTHOOK: query: insert into test_alter values ('5', '100') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_alter +POSTHOOK: Lineage: test_alter.id SCRIPT [] +POSTHOOK: Lineage: test_alter.id2 SCRIPT [] +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +5 100 +1 NULL +2 NULL +3 NULL +4 NULL +PREHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (id string, id2 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +5 100 +1 NULL +2 NULL +3 NULL +4 NULL +PREHOOK: query: alter table test_alter replace columns (id char(10), id2 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (id char(10), id2 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +5 100 +1 NULL +2 NULL +3 NULL +4 NULL +PREHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (id string, id2 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: alter table test_alter replace columns (id varchar(10), id2 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (id varchar(10), id2 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +5 100 +1 NULL +2 NULL +3 NULL +4 NULL +PREHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (id string, id2 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: alter table test_alter replace columns (idv varchar(10), id2 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (idv varchar(10), id2 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +NULL 100 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: create table test_alter2 (id int) stored as parquet +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: create table test_alter2 (id int) stored as parquet +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: insert into test_alter2 values (1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: insert into test_alter2 values (1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_alter2 +POSTHOOK: Lineage: test_alter2.id SCRIPT [] +PREHOOK: query: alter table test_alter2 replace columns (id bigint) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter2 +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: alter table test_alter2 replace columns (id bigint) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter2 +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: select * from test_alter2 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +1 +PREHOOK: query: drop table test_alter2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@test_alter2 +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: drop table test_alter2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@test_alter2 +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: create table test_alter2 (id float) stored as parquet +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: create table test_alter2 (id float) stored as parquet +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: insert into test_alter2 values (1.5) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: insert into test_alter2 values (1.5) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_alter2 +POSTHOOK: Lineage: test_alter2.id SCRIPT [] +PREHOOK: query: alter table test_alter2 replace columns (id double) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter2 +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: alter table test_alter2 replace columns (id double) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter2 +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: select * from test_alter2 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +1.5 +PREHOOK: query: create table test_alter3 (id1 tinyint, id2 smallint, id3 int, id4 bigint) stored as parquet +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_alter3 +POSTHOOK: query: create table test_alter3 (id1 tinyint, id2 smallint, id3 int, id4 bigint) stored as parquet +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_alter3 +PREHOOK: query: insert into test_alter3 values (10, 20, 30, 40) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_alter3 +POSTHOOK: query: insert into test_alter3 values (10, 20, 30, 40) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_alter3 +POSTHOOK: Lineage: test_alter3.id1 SCRIPT [] +POSTHOOK: Lineage: test_alter3.id2 SCRIPT [] +POSTHOOK: Lineage: test_alter3.id3 SCRIPT [] +POSTHOOK: Lineage: test_alter3.id4 SCRIPT [] +PREHOOK: query: alter table test_alter3 replace columns (id1 smallint, id2 int, id3 bigint, id4 decimal(10,4)) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter3 +PREHOOK: Output: default@test_alter3 +POSTHOOK: query: alter table test_alter3 replace columns (id1 smallint, id2 int, id3 bigint, id4 decimal(10,4)) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter3 +POSTHOOK: Output: default@test_alter3 +PREHOOK: query: select id1, id2, id3 from test_alter3 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter3 +#### A masked pattern was here #### +POSTHOOK: query: select id1, id2, id3 from test_alter3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter3 +#### A masked pattern was here #### +10 20 30 diff --git ql/src/test/results/clientpositive/schema_evol_par_vec_table_dictionary_encoding.q.out ql/src/test/results/clientpositive/schema_evol_par_vec_table_dictionary_encoding.q.out new file mode 100644 index 0000000..1d2f36d --- /dev/null +++ ql/src/test/results/clientpositive/schema_evol_par_vec_table_dictionary_encoding.q.out @@ -0,0 +1,522 @@ +PREHOOK: query: drop table test_alter +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table test_alter +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table test_alter2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table test_alter2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table test_alter3 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table test_alter3 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table test_alter (id string) stored as parquet +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_alter +POSTHOOK: query: create table test_alter (id string) stored as parquet +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_alter +PREHOOK: query: insert into test_alter values ('1'), ('2'), ('3') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_alter +POSTHOOK: query: insert into test_alter values ('1'), ('2'), ('3') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_alter +POSTHOOK: Lineage: test_alter.id SCRIPT [] +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +1 +2 +3 +PREHOOK: query: alter table test_alter add columns (newCol string) +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter add columns (newCol string) +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +1 NULL +2 NULL +3 NULL +PREHOOK: query: insert into test_alter values ('4', '100') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_alter +POSTHOOK: query: insert into test_alter values ('4', '100') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_alter +POSTHOOK: Lineage: test_alter.id SCRIPT [] +POSTHOOK: Lineage: test_alter.newcol SCRIPT [] +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +1 NULL +2 NULL +3 NULL +4 100 +PREHOOK: query: alter table test_alter replace columns (id string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (id string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +1 +2 +3 +4 +PREHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (id string, id2 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +1 NULL +2 NULL +3 NULL +4 NULL +PREHOOK: query: insert into test_alter values ('5', '100') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_alter +POSTHOOK: query: insert into test_alter values ('5', '100') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_alter +POSTHOOK: Lineage: test_alter.id SCRIPT [] +POSTHOOK: Lineage: test_alter.id2 SCRIPT [] +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +5 100 +1 NULL +2 NULL +3 NULL +4 NULL +PREHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (id string, id2 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +5 100 +1 NULL +2 NULL +3 NULL +4 NULL +PREHOOK: query: alter table test_alter replace columns (id char(10), id2 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (id char(10), id2 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +5 100 +1 NULL +2 NULL +3 NULL +4 NULL +PREHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (id string, id2 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: alter table test_alter replace columns (id varchar(10), id2 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (id varchar(10), id2 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +5 100 +1 NULL +2 NULL +3 NULL +4 NULL +PREHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (id string, id2 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: alter table test_alter replace columns (idv varchar(10), id2 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter +PREHOOK: Output: default@test_alter +POSTHOOK: query: alter table test_alter replace columns (idv varchar(10), id2 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter +POSTHOOK: Output: default@test_alter +PREHOOK: query: select * from test_alter +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter +#### A masked pattern was here #### +NULL 100 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: create table test_alter2 (id int) stored as parquet +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: create table test_alter2 (id int) stored as parquet +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: insert into test_alter2 values (1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: insert into test_alter2 values (1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_alter2 +POSTHOOK: Lineage: test_alter2.id SCRIPT [] +PREHOOK: query: alter table test_alter2 replace columns (id bigint) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter2 +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: alter table test_alter2 replace columns (id bigint) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter2 +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: select * from test_alter2 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +1 +PREHOOK: query: drop table test_alter2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@test_alter2 +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: drop table test_alter2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@test_alter2 +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: create table test_alter2 (id float) stored as parquet +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: create table test_alter2 (id float) stored as parquet +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: insert into test_alter2 values (1.5) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: insert into test_alter2 values (1.5) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_alter2 +POSTHOOK: Lineage: test_alter2.id SCRIPT [] +PREHOOK: query: alter table test_alter2 replace columns (id double) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter2 +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: alter table test_alter2 replace columns (id double) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter2 +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: select * from test_alter2 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +1.5 +PREHOOK: query: drop table test_alter2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@test_alter2 +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: drop table test_alter2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@test_alter2 +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: create table test_alter2 (ts timestamp) stored as parquet +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: create table test_alter2 (ts timestamp) stored as parquet +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_alter2 +POSTHOOK: Lineage: test_alter2.ts SCRIPT [] +PREHOOK: query: select * from test_alter2 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +2018-01-01 13:14:15.123456 +2018-01-02 14:15:16.123456 +2018-01-03 16:17:18.123456 +PREHOOK: query: alter table test_alter2 replace columns (ts string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter2 +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: alter table test_alter2 replace columns (ts string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter2 +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: select * from test_alter2 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +2018-01-01 13:14:15.123456 +2018-01-02 14:15:16.123456 +2018-01-03 16:17:18.123456 +PREHOOK: query: drop table test_alter2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@test_alter2 +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: drop table test_alter2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@test_alter2 +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: create table test_alter2 (ts timestamp) stored as parquet +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: create table test_alter2 (ts timestamp) stored as parquet +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_alter2 +POSTHOOK: Lineage: test_alter2.ts SCRIPT [] +PREHOOK: query: select * from test_alter2 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +2018-01-01 13:14:15.123456 +2018-01-02 14:15:16.123456 +2018-01-03 16:17:18.123456 +PREHOOK: query: alter table test_alter2 replace columns (ts varchar(19)) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter2 +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: alter table test_alter2 replace columns (ts varchar(19)) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter2 +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: select * from test_alter2 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +2018-01-01 13:14:15 +2018-01-02 14:15:16 +2018-01-03 16:17:18 +PREHOOK: query: drop table test_alter2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@test_alter2 +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: drop table test_alter2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@test_alter2 +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: create table test_alter2 (ts timestamp) stored as parquet +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: create table test_alter2 (ts timestamp) stored as parquet +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_alter2 +POSTHOOK: Lineage: test_alter2.ts SCRIPT [] +PREHOOK: query: select * from test_alter2 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +2018-01-01 13:14:15.123456 +2018-01-02 14:15:16.123456 +2018-01-03 16:17:18.123456 +PREHOOK: query: alter table test_alter2 replace columns (ts char(25)) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter2 +PREHOOK: Output: default@test_alter2 +POSTHOOK: query: alter table test_alter2 replace columns (ts char(25)) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter2 +POSTHOOK: Output: default@test_alter2 +PREHOOK: query: select * from test_alter2 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +POSTHOOK: query: select * from test_alter2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter2 +#### A masked pattern was here #### +2018-01-01 13:14:15.12345 +2018-01-02 14:15:16.12345 +2018-01-03 16:17:18.12345 +PREHOOK: query: create table test_alter3 (id1 tinyint, id2 smallint, id3 int, id4 bigint) stored as parquet +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_alter3 +POSTHOOK: query: create table test_alter3 (id1 tinyint, id2 smallint, id3 int, id4 bigint) stored as parquet +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_alter3 +PREHOOK: query: insert into test_alter3 values (10, 20, 30, 40) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_alter3 +POSTHOOK: query: insert into test_alter3 values (10, 20, 30, 40) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_alter3 +POSTHOOK: Lineage: test_alter3.id1 SCRIPT [] +POSTHOOK: Lineage: test_alter3.id2 SCRIPT [] +POSTHOOK: Lineage: test_alter3.id3 SCRIPT [] +POSTHOOK: Lineage: test_alter3.id4 SCRIPT [] +PREHOOK: query: alter table test_alter3 replace columns (id1 smallint, id2 int, id3 bigint, id4 decimal(10,4)) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@test_alter3 +PREHOOK: Output: default@test_alter3 +POSTHOOK: query: alter table test_alter3 replace columns (id1 smallint, id2 int, id3 bigint, id4 decimal(10,4)) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@test_alter3 +POSTHOOK: Output: default@test_alter3 +PREHOOK: query: select id1, id2, id3 from test_alter3 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_alter3 +#### A masked pattern was here #### +POSTHOOK: query: select id1, id2, id3 from test_alter3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_alter3 +#### A masked pattern was here #### +10 20 30