diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAssignRow.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAssignRow.java index e96619cf86..6864b87220 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAssignRow.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAssignRow.java @@ -70,6 +70,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -210,30 +211,10 @@ public void init(StructObjectInspector structObjectInspector, List proj } /* - * Initialize using an StructObjectInspector. - * No projection -- the column range 0 .. fields.size()-1 - */ - public void init(StructObjectInspector structObjectInspector) throws HiveException { - - final List fields = structObjectInspector.getAllStructFieldRefs(); - final int count = fields.size(); - allocateArrays(count); - - for (int i = 0; i < count; i++) { - - final StructField field = fields.get(i); - final ObjectInspector fieldInspector = field.getFieldObjectInspector(); - final TypeInfo typeInfo = - TypeInfoUtils.getTypeInfoFromTypeString(fieldInspector.getTypeName()); - - initTargetEntry(i, i, typeInfo); - } - } - - /* * Initialize using target data type names. * No projection -- the column range 0 .. types.size()-1 */ + @VisibleForTesting public void init(List typeNames) throws HiveException { final int count = typeNames.size(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapOperator.java index 6f1346d88a..c23a605040 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapOperator.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.exec.vector; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -36,6 +37,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; import org.apache.hadoop.hive.ql.io.orc.OrcSerde; import org.apache.hadoop.hive.ql.io.orc.OrcStruct; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -43,6 +45,7 @@ import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.ql.plan.VectorPartitionConversion; import org.apache.hadoop.hive.ql.plan.VectorPartitionDesc; import org.apache.hadoop.hive.ql.plan.VectorPartitionDesc.VectorMapOperatorReadType; import org.apache.hadoop.hive.ql.plan.api.OperatorType; @@ -60,11 +63,21 @@ import org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption; +import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo; import org.apache.hadoop.io.BinaryComparable; import org.apache.hadoop.io.Writable; +import org.apache.orc.TypeDescription; +import org.apache.orc.impl.ConvertTreeReaderFactory; +import org.apache.orc.impl.ConvertTreeReaderFactory.ConvertTreeReader; +import org.apache.orc.impl.SchemaEvolution; +import org.apache.orc.impl.TreeReaderFactory; +import org.apache.orc.impl.TreeReaderFactory.Context; +import org.apache.orc.impl.TreeReaderFactory.TreeReader; import com.google.common.base.Preconditions; @@ -154,6 +167,9 @@ // When we are doing row deserialization, these are the regular deserializer, // partition object inspector, and vector row assigner. + private ConvertTreeReader[] convertTreeReaders; + private transient boolean isWarnLogged = false; + /* * The abstract context for the 3 kinds of vectorized reading. */ @@ -206,6 +222,8 @@ public abstract void init(Configuration hconf) */ protected class VectorizedInputFileFormatPartitionContext extends VectorPartitionContext { + private ConvertTreeReader[] convertTreeReaders; + private VectorizedInputFileFormatPartitionContext(PartitionDesc partDesc) { super(partDesc); } @@ -217,6 +235,10 @@ public void init(Configuration hconf) { public int getReaderDataColumnCount() { throw new RuntimeException("Not applicable"); } + + public void setConvertTreeReaders(ConvertTreeReader[] ctrs) { + this.convertTreeReaders = ctrs; + } } /* @@ -402,9 +424,16 @@ public VectorPartitionContext createAndInitPartitionContext(PartitionDesc partDe Preconditions.checkState(Utilities.isSchemaEvolutionEnabled(hconf, isAcid)); } + LOG.debug("Processing in {} mode", vectorMapOperatorReadType); switch (vectorMapOperatorReadType) { case VECTORIZED_INPUT_FILE_FORMAT: - vectorPartitionContext = new VectorizedInputFileFormatPartitionContext(partDesc); + VectorizedInputFileFormatPartitionContext vipc; + vectorPartitionContext = vipc = new VectorizedInputFileFormatPartitionContext(partDesc); + if (OrcSerde.class.getCanonicalName().equals(partDesc.getSerdeClassName())) { + // TODO# check if LLAP IO is being used and skip the below if not. + } + initializeVrbConversion(vectorPartDesc, vipc); + break; case VECTOR_DESERIALIZE: @@ -425,6 +454,60 @@ public VectorPartitionContext createAndInitPartitionContext(PartitionDesc partDe return vectorPartitionContext; } + public void initializeVrbConversion(VectorPartitionDesc vectorPartDesc, + VectorizedInputFileFormatPartitionContext vipc) throws IOException { + TypeInfo[] sourceTypeInfos = vectorPartDesc.getDataTypeInfos(), + targetTypeInfos = tableRowTypeInfos; + Preconditions.checkState(targetTypeInfos.length >= sourceTypeInfos.length); + final int columnCount = sourceTypeInfos.length; + TypeDescription sdesc = TypeDescription.createStruct(); + TypeDescription tdesc = TypeDescription.createStruct(); + boolean hasConversion = false; + for (int i = 0; i < columnCount; i++) { + TypeInfo tt = targetTypeInfos[i], st = sourceTypeInfos[i]; + LOG.debug("Looking at {} and {} for compatibility", tt, st); + boolean isConversion = (!st.equals(tt) + && !VectorPartitionConversion.isImplicitVectorColumnConversion(st, tt)); + hasConversion = hasConversion || isConversion; + sdesc.addField("c" + i, OrcInputFormat.convertTypeInfo(st)); + tdesc.addField("c" + i, OrcInputFormat.convertTypeInfo(tt)); + } + if (hasConversion) { + // Note: this assumes ID assignment will be deterministic. Trigger ID generation. + sdesc.getId(); + tdesc.getId(); + // We don't pass the includes in. It expects ORC includes incl. the struct, but what we + // have here are logical includes based on table schema. + // TODO: Sort out later via Includes class? It's not really needed for SE class. + final SchemaEvolution se = new SchemaEvolution(sdesc, tdesc, (boolean[])null); + ConvertTreeReader[] ctrs = new ConvertTreeReader[columnCount]; + Context context = new Context() { + public boolean isSkipCorrupt() { + return false; + } + public String getWriterTimezone() { + return null; + } + public SchemaEvolution getSchemaEvolution() { + return se; + } + }; + for (int i = 0; i < columnCount; i++) { + if (dataColumnsToIncludeTruncated != null + && (dataColumnsToIncludeTruncated.length <= i || !dataColumnsToIncludeTruncated[i])) { + continue; + } + TypeInfo st = sourceTypeInfos[i], tt = targetTypeInfos[i]; + if (st.equals(tt)) continue; + if (VectorPartitionConversion.isImplicitVectorColumnConversion(st, tt)) continue; + ctrs[i] = (ConvertTreeReader) ConvertTreeReaderFactory.createConvertTreeReader( + tdesc.getChildren().get(i), context); + } + LOG.info("Conversion readers for VRB mode are " + Arrays.toString(ctrs)); + vipc.setConvertTreeReaders(ctrs); + } + } + private void determineDataColumnsToIncludeTruncated() { Preconditions.checkState(batchContext != null); @@ -650,6 +733,7 @@ private void setupPartitionContextVars(String nominalPath) throws HiveException PartitionDesc partDesc = currentVectorPartContext.getPartDesc(); VectorPartitionDesc vectorPartDesc = partDesc.getVectorPartitionDesc(); + currentReadType = vectorPartDesc.getVectorMapOperatorReadType(); /* @@ -680,6 +764,9 @@ private void setupPartitionContextVars(String nominalPath) throws HiveException currentPartDeserializer = null; currentPartRawRowObjectInspector = null; currentVectorAssign = null; + VectorizedInputFileFormatPartitionContext ctx = + (VectorizedInputFileFormatPartitionContext) currentVectorPartContext; + convertTreeReaders = ctx.convertTreeReaders; } else { @@ -809,6 +896,24 @@ private boolean deliverVectorizedRowBatch(Writable value) throws HiveException { batch.cols[rowIdentifierColumnNum] = batchContext.getRecordIdColumnVector(); } } + if (convertTreeReaders != null) { + for (int i = 0; i < convertTreeReaders.length; ++i) { + ConvertTreeReader ctr = convertTreeReaders[i]; + if (ctr == null) continue; + // TODO# there are 2 problems here; + // 1) for some type conversions, e.g. float to string, the type of the vector that we + // need for after the conversion does not exist in the batch and needs to be created. + // 2) Some ConvertTreeReader-s implement the requirements for convertVector; however + // others do not, and call nextVector on an internal reader that needs to be + // initialized via bypass methods like startStripe. We cannot access it without huge + // visibility hacks; all these readers are protected or package visible so we can + // reset them to a dummy since and we don't need them to do anything... + // Overall the interface is very roundabout w.r.t. cross-dependencies, and it's + // not clear why it's in ORC when it's not specific to ORC format (all the converters + // for non-VRB modes are in Hive and not format specific). + // So after fixing 1-2 we can either call convertVector or nextVector here. + } + } } oneRootOperator.process(value, 0); if (oneRootOperator.getDone()) { @@ -850,11 +955,15 @@ public void process(Writable value) throws HiveException { } } else if (value instanceof VectorizedRowBatch) { - /* * This case can happen with LLAP. If it is able to deserialize and cache data from the * input format, it will deliver that cached data to us as VRBs. */ + if (!isWarnLogged) { + // TODO: this should have been handled by findSerDeForLlapSerDeIf. + LOG.warn("Received VRB despite not being in VRB mode; this is a bug"); + isWarnLogged = true; + } /* * Clear out any rows we may have processed in row-mode for the current partition.. @@ -880,7 +989,7 @@ public void process(Writable value) throws HiveException { currentReadType == VectorMapOperatorReadType.VECTOR_DESERIALIZE || currentReadType == VectorMapOperatorReadType.ROW_DESERIALIZE); - if (deserializerBatch.size == deserializerBatch.DEFAULT_SIZE) { + if (deserializerBatch.size == VectorizedRowBatch.DEFAULT_SIZE) { numRows += deserializerBatch.size; /* diff --git ql/src/test/queries/clientpositive/schema_evolution_nonorc.q ql/src/test/queries/clientpositive/schema_evolution_nonorc.q new file mode 100644 index 0000000000..dba7f4f6a3 --- /dev/null +++ ql/src/test/queries/clientpositive/schema_evolution_nonorc.q @@ -0,0 +1,53 @@ +set hive.explain.user=false; +set hive.fetch.task.conversion=none; +set hive.exec.dynamic.partition.mode=nonstrict; + +SET hive.vectorized.execution.enabled=true; +SET hive.vectorized.use.vectorized.input.format=true; +SET hive.vectorized.use.vector.serde.deserialize=true; +SET hive.vectorized.use.row.serde.deserialize=true; + +SET hive.exec.schema.evolution=true; +set hive.metastore.disallow.incompatible.col.type.changes=false; + +set hive.llap.io.encode.enabled=true; +SET hive.llap.io.enabled=false; + + +CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 varchar(50), string2 varchar(50), date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +row format delimited fields terminated by '|' stored as textfile; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; + + +drop table if exists vsp; +create table vsp(vs varchar(50), i float) partitioned by(s varchar(50)) stored as textfile; +insert into table vsp partition(s='positive') select string1,float1 from schema_evolution_data; +alter table vsp change column vs vs varchar(3); +alter table vsp change column i i varchar(3); + +drop table if exists vsp_orc; +create table vsp_orc(vs varchar(50), i float) partitioned by(s varchar(50)) stored as orc; +insert into table vsp_orc partition(s='positive') select string1,float1 from schema_evolution_data; +alter table vsp_orc change column vs vs varchar(3); +alter table vsp_orc change column i i varchar(3); + +drop table if exists vsp_parquet; +create table vsp_parquet(vs varchar(50), i float) partitioned by(s varchar(50)) stored as parquet; +insert into table vsp_parquet partition(s='positive') select string1,float1 from schema_evolution_data; +alter table vsp_parquet change column vs vs varchar(3); +alter table vsp_parquet change column i i varchar(3); + + + +SET hive.llap.io.enabled=false; +select length(vs), length(i), i from vsp_parquet; +select length(vs), length(i), i from vsp; +select length(vs), length(i), i from vsp_orc; + + +SET hive.llap.io.enabled=true; + +-- TODO: add i, length(i): ClassCastException + +select length(vs) from vsp vsp_llap; +select length(vs) from vsp_orc vsp_orc_llap; diff --git ql/src/test/results/clientpositive/llap/schema_evolution_nonorc.q.out ql/src/test/results/clientpositive/llap/schema_evolution_nonorc.q.out new file mode 100644 index 0000000000..fe76e99d8a --- /dev/null +++ ql/src/test/results/clientpositive/llap/schema_evolution_nonorc.q.out @@ -0,0 +1,207 @@ +PREHOOK: query: CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 varchar(50), string2 varchar(50), date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +row format delimited fields terminated by '|' stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@schema_evolution_data +POSTHOOK: query: CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 varchar(50), string2 varchar(50), date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +row format delimited fields terminated by '|' stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@schema_evolution_data +PREHOOK: query: load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@schema_evolution_data +POSTHOOK: query: load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@schema_evolution_data +PREHOOK: query: drop table if exists vsp +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists vsp +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table vsp(vs varchar(50), i float) partitioned by(s varchar(50)) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vsp +POSTHOOK: query: create table vsp(vs varchar(50), i float) partitioned by(s varchar(50)) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vsp +PREHOOK: query: insert into table vsp partition(s='positive') select string1,float1 from schema_evolution_data +PREHOOK: type: QUERY +PREHOOK: Input: default@schema_evolution_data +PREHOOK: Output: default@vsp@s=positive +POSTHOOK: query: insert into table vsp partition(s='positive') select string1,float1 from schema_evolution_data +POSTHOOK: type: QUERY +POSTHOOK: Input: default@schema_evolution_data +POSTHOOK: Output: default@vsp@s=positive +POSTHOOK: Lineage: vsp PARTITION(s=positive).i SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] +POSTHOOK: Lineage: vsp PARTITION(s=positive).vs SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string1, type:varchar(50), comment:null), ] +PREHOOK: query: alter table vsp change column vs vs varchar(3) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@vsp +PREHOOK: Output: default@vsp +POSTHOOK: query: alter table vsp change column vs vs varchar(3) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@vsp +POSTHOOK: Output: default@vsp +PREHOOK: query: alter table vsp change column i i varchar(3) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@vsp +PREHOOK: Output: default@vsp +POSTHOOK: query: alter table vsp change column i i varchar(3) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@vsp +POSTHOOK: Output: default@vsp +PREHOOK: query: drop table if exists vsp_orc +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists vsp_orc +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table vsp_orc(vs varchar(50), i float) partitioned by(s varchar(50)) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vsp_orc +POSTHOOK: query: create table vsp_orc(vs varchar(50), i float) partitioned by(s varchar(50)) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vsp_orc +PREHOOK: query: insert into table vsp_orc partition(s='positive') select string1,float1 from schema_evolution_data +PREHOOK: type: QUERY +PREHOOK: Input: default@schema_evolution_data +PREHOOK: Output: default@vsp_orc@s=positive +POSTHOOK: query: insert into table vsp_orc partition(s='positive') select string1,float1 from schema_evolution_data +POSTHOOK: type: QUERY +POSTHOOK: Input: default@schema_evolution_data +POSTHOOK: Output: default@vsp_orc@s=positive +POSTHOOK: Lineage: vsp_orc PARTITION(s=positive).i SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] +POSTHOOK: Lineage: vsp_orc PARTITION(s=positive).vs SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string1, type:varchar(50), comment:null), ] +PREHOOK: query: alter table vsp_orc change column vs vs varchar(3) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@vsp_orc +PREHOOK: Output: default@vsp_orc +POSTHOOK: query: alter table vsp_orc change column vs vs varchar(3) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@vsp_orc +POSTHOOK: Output: default@vsp_orc +PREHOOK: query: alter table vsp_orc change column i i varchar(3) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@vsp_orc +PREHOOK: Output: default@vsp_orc +POSTHOOK: query: alter table vsp_orc change column i i varchar(3) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@vsp_orc +POSTHOOK: Output: default@vsp_orc +PREHOOK: query: drop table if exists vsp_parquet +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists vsp_parquet +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table vsp_parquet(vs varchar(50), i float) partitioned by(s varchar(50)) stored as parquet +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vsp_parquet +POSTHOOK: query: create table vsp_parquet(vs varchar(50), i float) partitioned by(s varchar(50)) stored as parquet +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vsp_parquet +PREHOOK: query: insert into table vsp_parquet partition(s='positive') select string1,float1 from schema_evolution_data +PREHOOK: type: QUERY +PREHOOK: Input: default@schema_evolution_data +PREHOOK: Output: default@vsp_parquet@s=positive +POSTHOOK: query: insert into table vsp_parquet partition(s='positive') select string1,float1 from schema_evolution_data +POSTHOOK: type: QUERY +POSTHOOK: Input: default@schema_evolution_data +POSTHOOK: Output: default@vsp_parquet@s=positive +POSTHOOK: Lineage: vsp_parquet PARTITION(s=positive).i SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] +POSTHOOK: Lineage: vsp_parquet PARTITION(s=positive).vs SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:string1, type:varchar(50), comment:null), ] +PREHOOK: query: alter table vsp_parquet change column vs vs varchar(3) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@vsp_parquet +PREHOOK: Output: default@vsp_parquet +POSTHOOK: query: alter table vsp_parquet change column vs vs varchar(3) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@vsp_parquet +POSTHOOK: Output: default@vsp_parquet +PREHOOK: query: alter table vsp_parquet change column i i varchar(3) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@vsp_parquet +PREHOOK: Output: default@vsp_parquet +POSTHOOK: query: alter table vsp_parquet change column i i varchar(3) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@vsp_parquet +POSTHOOK: Output: default@vsp_parquet +PREHOOK: query: select length(vs), length(i), i from vsp_parquet +PREHOOK: type: QUERY +PREHOOK: Input: default@vsp_parquet +PREHOOK: Input: default@vsp_parquet@s=positive +#### A masked pattern was here #### +POSTHOOK: query: select length(vs), length(i), i from vsp_parquet +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vsp_parquet +POSTHOOK: Input: default@vsp_parquet@s=positive +#### A masked pattern was here #### +3 3 Inf +3 3 -In +0 NULL NULL +3 3 -10 +3 NULL NULL +PREHOOK: query: select length(vs), length(i), i from vsp +PREHOOK: type: QUERY +PREHOOK: Input: default@vsp +PREHOOK: Input: default@vsp@s=positive +#### A masked pattern was here #### +POSTHOOK: query: select length(vs), length(i), i from vsp +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vsp +POSTHOOK: Input: default@vsp@s=positive +#### A masked pattern was here #### +3 8 Inf +3 9 -In +0 NULL NULL +3 10 -10 +3 NULL NULL +PREHOOK: query: select length(vs), length(i), i from vsp_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@vsp_orc +PREHOOK: Input: default@vsp_orc@s=positive +#### A masked pattern was here #### +POSTHOOK: query: select length(vs), length(i), i from vsp_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vsp_orc +POSTHOOK: Input: default@vsp_orc@s=positive +#### A masked pattern was here #### +3 3 Inf +3 3 -In +0 NULL NULL +3 3 -10 +3 NULL NULL +PREHOOK: query: select length(vs) from vsp vsp_llap +PREHOOK: type: QUERY +PREHOOK: Input: default@vsp +PREHOOK: Input: default@vsp@s=positive +#### A masked pattern was here #### +POSTHOOK: query: select length(vs) from vsp vsp_llap +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vsp +POSTHOOK: Input: default@vsp@s=positive +#### A masked pattern was here #### +8 +9 +0 +13 +3 +PREHOOK: query: select length(vs) from vsp_orc vsp_orc_llap +PREHOOK: type: QUERY +PREHOOK: Input: default@vsp_orc +PREHOOK: Input: default@vsp_orc@s=positive +#### A masked pattern was here #### +POSTHOOK: query: select length(vs) from vsp_orc vsp_orc_llap +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vsp_orc +POSTHOOK: Input: default@vsp_orc@s=positive +#### A masked pattern was here #### +8 +9 +0 +13 +3