diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 65ec1b9..068996c 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1967,11 +1967,35 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { "This flag should be set to true to enable vectorized mode of query execution.\n" + "The default value is false."), HIVE_VECTORIZATION_REDUCE_ENABLED("hive.vectorized.execution.reduce.enabled", true, - "This flag should be set to true to enable vectorized mode of the reduce-side of query execution.\n" + - "The default value is true."), + "This flag should be set to true to enable vectorized mode of the reduce-side of query execution.\n" + + "The default value is true."), HIVE_VECTORIZATION_REDUCE_GROUPBY_ENABLED("hive.vectorized.execution.reduce.groupby.enabled", true, - "This flag should be set to true to enable vectorized mode of the reduce-side GROUP BY query execution.\n" + - "The default value is true."), + "This flag should be set to true to enable vectorized mode of the reduce-side GROUP BY query execution.\n" + + "The default value is true."), + HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED("hive.vectorized.execution.mapjoin.native.enabled", true, + "This flag should be set to true to enable native (i.e. non-pass through) vectorization\n" + + "of queries using MapJoin.\n" + + "The default value is true."), + HIVE_VECTORIZATION_MAPJOIN_NATIVE_MULTIKEY_ONLY_ENABLED("hive.vectorized.execution.mapjoin.native.multikey.only.enabled", false, + "This flag should be set to true to restrict use of native vector map join hash tables to\n" + + "the MultiKey in queries using MapJoin.\n" + + "The default value is false."), + HIVE_VECTORIZATION_MAPJOIN_NATIVE_MINMAX_ENABLED("hive.vectorized.execution.mapjoin.minmax.enabled", false, + "This flag should be set to true to enable vector map join hash tables to\n" + + "use max / max filtering for integer join queries using MapJoin.\n" + + "The default value is false."), + HIVE_VECTORIZATION_MAPJOIN_NATIVE_OVERFLOW_BATCH_ONLY("hive.vectorized.execution.mapjoin.overflow.batch.only", false, + "This flag should be set to true to make vector map join hash tables\n" + + "use only use the overflow vectorized row batch for join queries using MapJoin.\n" + + "The default value is false."), + HIVE_VECTORIZATION_MAPJOIN_NATIVE_OVERFLOW_REPEATED_THRESHOLD("hive.vectorized.execution.mapjoin.overflow.repeated.threshold", -1, + "The number of small table rows for a match in vector map join hash tables\n" + + "where we use the repeated field optimization in overflow vectorized row batch for join queries using MapJoin.\n" + + "A value of -1 means do use the join result optimization. Otherwise, threshold value can be 0 to maximum integer."), + HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED("hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled", false, + "This flag should be set to true to enable use of native fast vector map join hash tables in\n" + + "queries using MapJoin.\n" + + "The default value is false."), HIVE_VECTORIZATION_GROUPBY_CHECKINTERVAL("hive.vectorized.groupby.checkinterval", 100000, "Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed."), HIVE_VECTORIZATION_GROUPBY_MAXENTRIES("hive.vectorized.groupby.maxentries", 1000000, diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java index 38d72f5..52a86c4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java @@ -84,14 +84,14 @@ protected transient MapJoinTableContainer[] mapJoinTables; private transient MapJoinTableContainerSerDe[] mapJoinTableSerdes; private transient boolean hashTblInitedOnce; - private transient ReusableGetAdaptor[] hashMapRowGetters; + protected transient ReusableGetAdaptor[] hashMapRowGetters; private UnwrapRowContainer[] unwrapContainer; private transient Configuration hconf; private transient boolean hybridMapJoinLeftover; // whether there's spilled data to be processed - private transient MapJoinBytesTableContainer currentSmallTable; // reloaded hashmap from disk - private transient int tag; // big table alias - private transient int smallTable; // small table alias + protected transient MapJoinBytesTableContainer currentSmallTable; // reloaded hashmap from disk + protected transient int tag; // big table alias + protected transient int smallTable; // small table alias public MapJoinOperator() { } @@ -115,6 +115,10 @@ public void startGroup() throws HiveException { defaultStartGroup(); } + protected HashTableLoader getHashTableLoader(Configuration hconf) { + return HashTableLoaderFactory.getLoader(hconf); + } + @Override protected Collection> initializeOp(Configuration hconf) throws HiveException { this.hconf = hconf; @@ -133,7 +137,7 @@ public void startGroup() throws HiveException { + "__HASH_MAP_"+this.getOperatorId()+"_container"; cache = ObjectCacheFactory.getCache(hconf); - loader = HashTableLoaderFactory.getLoader(hconf); + loader = getHashTableLoader(hconf); hashMapRowGetters = null; @@ -266,7 +270,7 @@ public void generateMapMetaData() throws HiveException { } } - private Pair loadHashTable( + protected Pair loadHashTable( ExecMapperContext mapContext, MapredContext mrContext) throws HiveException { loadCalled = true; @@ -280,6 +284,7 @@ public void generateMapMetaData() throws HiveException { } perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.LOAD_HASHTABLE); + LOG.info(this.getClass().getSimpleName() + " loading from loader " + loader.getClass().getSimpleName() + " cacheKey " + cacheKey); loader.init(mapContext, mrContext, hconf, this); long memUsage = (long)(MapJoinMemoryExhaustionHandler.getMaxHeapSize() * conf.getHashTableMemoryUsage()); @@ -518,7 +523,7 @@ private void continueProcess(HashPartition partition, HybridHashTableContainer h * @throws HiveException * @throws SerDeException */ - private void reloadHashTable(HashPartition partition, + protected void reloadHashTable(HashPartition partition, HybridHashTableContainer hybridHtContainer) throws IOException, ClassNotFoundException, HiveException, SerDeException { @@ -566,8 +571,9 @@ private void reloadHashTable(HashPartition partition, * Iterate over the big table row container and feed process() with leftover rows * @param partition the hash partition being brought back to memory at the moment * @throws HiveException + * @throws IOException */ - protected void reProcessBigTable(HashPartition partition) throws HiveException { + protected void reProcessBigTable(HashPartition partition) throws HiveException, IOException { ObjectContainer bigTable = partition.getMatchfileObjContainer(); while (bigTable.hasNext()) { Object row = bigTable.next(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java index 91e8a02..c4554a7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java @@ -153,21 +153,25 @@ public OpTuple(Class descClass, Class> opClass) { } } + public static Operator getVectorOperator( + Class> opClass, T conf, VectorizationContext vContext) throws HiveException { + try { + Operator op = (Operator) opClass.getDeclaredConstructor( + VectorizationContext.class, OperatorDesc.class).newInstance( + vContext, conf); + return op; + } catch (Exception e) { + e.printStackTrace(); + throw new HiveException(e); + } + } public static Operator getVectorOperator(T conf, VectorizationContext vContext) throws HiveException { Class descClass = (Class) conf.getClass(); for (OpTuple o : vectorOpvec) { if (o.descClass == descClass) { - try { - Operator op = (Operator) o.opClass.getDeclaredConstructor( - VectorizationContext.class, OperatorDesc.class).newInstance( - vContext, conf); - return op; - } catch (Exception e) { - e.printStackTrace(); - throw new HiveException(e); - } + return getVectorOperator(o.opClass, conf, vContext); } } throw new HiveException("No vector operator for descriptor class " diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java index 67477c2..31713a9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java @@ -35,9 +35,11 @@ import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator; import org.apache.hadoop.hive.ql.exec.JoinUtil; import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinBytesTableContainer.KeyValueHelper; import org.apache.hadoop.hive.ql.exec.vector.VectorHashKeyWrapper; import org.apache.hadoop.hive.ql.exec.vector.VectorHashKeyWrapperBatch; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinRowBytesContainer; import org.apache.hadoop.hive.ql.io.HiveKey; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.ByteStream.Output; @@ -65,7 +67,8 @@ * Partitions that can fit in memory will be processed first, and then every spilled partition will * be restored and processed one by one. */ -public class HybridHashTableContainer implements MapJoinTableContainer { +public class HybridHashTableContainer + implements MapJoinTableContainer, MapJoinTableContainerDirectAccess { private static final Log LOG = LogFactory.getLog(HybridHashTableContainer.class); private final HashPartition[] hashPartitions; // an array of partitions holding the triplets @@ -83,6 +86,7 @@ private LazyBinaryStructObjectInspector internalValueOi; private boolean[] sortableSortOrders; private MapJoinBytesTableContainer.KeyValueHelper writeHelper; + private MapJoinBytesTableContainer.DirectKeyValueWriter directWriteHelper; private final List EMPTY_LIST = new ArrayList(0); @@ -94,6 +98,8 @@ BytesBytesMultiHashMap hashMap; // In memory hashMap KeyValueContainer sidefileKVContainer; // Stores small table key/value pairs ObjectContainer matchfileObjContainer; // Stores big table rows + VectorMapJoinRowBytesContainer matchfileRowBytesContainer; + // Stores big table rows as bytes for native vector map join. Path hashMapLocalPath; // Local file system path for spilled hashMap boolean hashMapOnDisk; // Status of hashMap. true: on disk, false: in memory boolean hashMapSpilledOnCreation; // When there's no enough memory, cannot create hashMap @@ -162,6 +168,14 @@ public ObjectContainer getMatchfileObjContainer() { return matchfileObjContainer; } + /* Get the big table row bytes container for native vector map join */ + public VectorMapJoinRowBytesContainer getMatchfileRowBytesContainer() { + if (matchfileRowBytesContainer == null) { + matchfileRowBytesContainer = new VectorMapJoinRowBytesContainer(); + } + return matchfileRowBytesContainer; + } + /* Check if hashmap is on disk or in memory */ public boolean isHashMapOnDisk() { return hashMapOnDisk; @@ -272,9 +286,14 @@ public MapJoinKey putRow(MapJoinObjectSerDeContext keyContext, Writable currentK } } writeHelper.setKeyValue(currentKey, currentValue); + return internalPutRow(writeHelper, currentKey, currentValue); + } + + private MapJoinKey internalPutRow(KeyValueHelper keyValueHelper, + Writable currentKey, Writable currentValue) throws SerDeException, IOException { // Next, put row into corresponding hash partition - int keyHash = writeHelper.getHashFromKey(); + int keyHash = keyValueHelper.getHashFromKey(); int partitionId = keyHash & (hashPartitions.length - 1); HashPartition hashPartition = hashPartitions[partitionId]; @@ -282,7 +301,7 @@ public MapJoinKey putRow(MapJoinObjectSerDeContext keyContext, Writable currentK KeyValueContainer kvContainer = hashPartition.getSidefileKVContainer(); kvContainer.add((HiveKey) currentKey, (BytesWritable) currentValue); } else { - hashPartition.hashMap.put(writeHelper, keyHash); // Pass along hashcode to avoid recalculation + hashPartition.hashMap.put(keyValueHelper, keyHash); // Pass along hashcode to avoid recalculation totalInMemRowCount++; if ((totalInMemRowCount & (this.memoryCheckFrequency - 1)) == 0 && // check periodically @@ -499,9 +518,21 @@ public void seal() { } } + + // Direct access interfaces. + + @Override + public void put(Writable currentKey, Writable currentValue) throws SerDeException, IOException { + if (directWriteHelper == null) { + directWriteHelper = new MapJoinBytesTableContainer.DirectKeyValueWriter(); + } + directWriteHelper.setKeyValue(currentKey, currentValue); + internalPutRow(directWriteHelper, currentKey, currentValue); + } + /** Implementation of ReusableGetAdaptor that has Output for key serialization; row * container is also created once and reused for every row. */ - private class GetAdaptor implements ReusableGetAdaptor { + private class GetAdaptor implements ReusableGetAdaptor, ReusableGetAdaptorDirectAccess { private Object[] currentKey; private boolean[] nulls; @@ -581,6 +612,19 @@ public MapJoinRowContainer getCurrentRows() { public Object[] getCurrentKey() { return currentKey; } + + // Direct access interfaces. + + @Override + public JoinUtil.JoinResult setDirect(byte[] bytes, int offset, int length, + BytesBytesMultiHashMap.Result hashMapResult) { + return currentValue.setDirect(bytes, offset, length, hashMapResult); + } + + @Override + public int directSpillPartitionId() { + return currentValue.directSpillPartitionId(); + } } /** Row container that gets and deserializes the rows on demand from bytes provided. */ @@ -743,6 +787,34 @@ public void addRow(Object[] value) { public void write(MapJoinObjectSerDeContext valueContext, ObjectOutputStream out) { throw new RuntimeException(this.getClass().getCanonicalName() + " cannot be serialized"); } + + // Direct access. + + public JoinUtil.JoinResult setDirect(byte[] bytes, int offset, int length, + BytesBytesMultiHashMap.Result hashMapResult) { + + int keyHash = WriteBuffers.murmurHash(bytes, offset, length); + partitionId = keyHash & (hashPartitions.length - 1); + + // If the target hash table is on disk, spill this row to disk as well to be processed later + if (isOnDisk(partitionId)) { + return JoinUtil.JoinResult.SPILL; + } + else { + aliasFilter = hashPartitions[partitionId].hashMap.getValueResult(bytes, offset, length, hashMapResult); + dummyRow = null; + if (hashMapResult.hasRows()) { + return JoinUtil.JoinResult.MATCH; + } else { + aliasFilter = (byte) 0xff; + return JoinUtil.JoinResult.NOMATCH; + } + } + } + + public int directSpillPartitionId() { + return partitionId; + } } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java index 3fdabea..df306eb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator; import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.JoinUtil.JoinResult; import org.apache.hadoop.hive.ql.exec.vector.VectorHashKeyWrapper; import org.apache.hadoop.hive.ql.exec.vector.VectorHashKeyWrapperBatch; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; @@ -54,6 +55,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.io.BinaryComparable; +import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.Writable; /** @@ -61,7 +63,8 @@ * BytesBytesMultiHashMap, with very low memory overhead. However, * there may be some perf overhead when retrieving rows. */ -public class MapJoinBytesTableContainer implements MapJoinTableContainer { +public class MapJoinBytesTableContainer + implements MapJoinTableContainer, MapJoinTableContainerDirectAccess { private static final Log LOG = LogFactory.getLog(MapJoinTableContainer.class); private final BytesBytesMultiHashMap hashMap; @@ -75,6 +78,7 @@ */ private boolean[] sortableSortOrders; private KeyValueHelper writeHelper; + private DirectKeyValueWriter directWriteHelper; private final List EMPTY_LIST = new ArrayList(0); @@ -294,6 +298,48 @@ public byte updateStateByte(Byte previousValue) { } } + /* + * An implementation of KvSource that can handle key and value as BytesWritable objects. + */ + protected static class DirectKeyValueWriter implements KeyValueHelper { + + private BytesWritable key; + private BytesWritable val; + + @Override + public void setKeyValue(Writable key, Writable val) throws SerDeException { + this.key = (BytesWritable) key; + this.val = (BytesWritable) val; + } + + @Override + public void writeKey(RandomAccessOutput dest) throws SerDeException { + byte[] keyBytes = key.getBytes(); + int keyLength = key.getLength(); + dest.write(keyBytes, 0, keyLength); + } + + @Override + public void writeValue(RandomAccessOutput dest) throws SerDeException { + byte[] valueBytes = val.getBytes(); + int valueLength = val.getLength(); + dest.write(valueBytes, 0 , valueLength); + } + + @Override + public byte updateStateByte(Byte previousValue) { + // Not used by the direct access client -- native vector map join. + throw new UnsupportedOperationException("Updating the state by not supported"); + } + + @Override + public int getHashFromKey() throws SerDeException { + byte[] keyBytes = key.getBytes(); + int keyLength = key.getLength(); + return WriteBuffers.murmurHash(keyBytes, 0, keyLength); + } + } + @SuppressWarnings("deprecation") @Override public MapJoinKey putRow(MapJoinObjectSerDeContext keyContext, Writable currentKey, @@ -342,9 +388,20 @@ public void seal() { hashMap.seal(); } + // Direct access interfaces. + + @Override + public void put(Writable currentKey, Writable currentValue) throws SerDeException { + if (directWriteHelper == null) { + directWriteHelper = new DirectKeyValueWriter(); + } + directWriteHelper.setKeyValue(currentKey, currentValue); + hashMap.put(directWriteHelper, -1); + } + /** Implementation of ReusableGetAdaptor that has Output for key serialization; row * container is also created once and reused for every row. */ - private class GetAdaptor implements ReusableGetAdaptor { + private class GetAdaptor implements ReusableGetAdaptor, ReusableGetAdaptorDirectAccess { private Object[] currentKey; private boolean[] nulls; @@ -424,6 +481,19 @@ public MapJoinRowContainer getCurrentRows() { public Object[] getCurrentKey() { return currentKey; } + + // Direct access interfaces. + + @Override + public JoinUtil.JoinResult setDirect(byte[] bytes, int offset, int length, + BytesBytesMultiHashMap.Result hashMapResult) { + return currentValue.setDirect(bytes, offset, length, hashMapResult); + } + + @Override + public int directSpillPartitionId() { + throw new UnsupportedOperationException("Getting the spill hash partition not supported"); + } } /** Row container that gets and deserializes the rows on demand from bytes provided. */ @@ -573,6 +643,22 @@ public void addRow(Object[] value) { public void write(MapJoinObjectSerDeContext valueContext, ObjectOutputStream out) { throw new RuntimeException(this.getClass().getCanonicalName() + " cannot be serialized"); } + + // Direct access. + + public JoinUtil.JoinResult setDirect(byte[] bytes, int offset, int length, + BytesBytesMultiHashMap.Result hashMapResult) { + + int keyHash = WriteBuffers.murmurHash(bytes, offset, length); + aliasFilter = hashMap.getValueResult(bytes, offset, length, hashMapResult); + dummyRow = null; + if (hashMapResult.hasRows()) { + return JoinUtil.JoinResult.MATCH; + } else { + aliasFilter = (byte) 0xff; + return JoinUtil.JoinResult.NOMATCH; + } + } } public static boolean isSupportedKey(ObjectInspector keyOi) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java index 0738842..02f25e7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java @@ -42,6 +42,9 @@ import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.io.Writable; /** @@ -96,6 +99,17 @@ public static boolean isSupportedField(ObjectInspector foi) { return true; } + public static boolean isSupportedField(String typeName) { + TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeName); + + if (typeInfo.getCategory() != Category.PRIMITIVE) return false; // not supported + PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) typeInfo; + PrimitiveCategory pc = primitiveTypeInfo.getPrimitiveCategory(); + if (!SUPPORTED_PRIMITIVES.contains(pc)) return false; // not supported + return true; + } + + public static MapJoinKey readFromVector(Output output, MapJoinKey key, Object[] keyObject, List keyOIs, boolean mayReuseKey) throws HiveException { MapJoinKeyObject result = mayReuseKey ? (MapJoinKeyObject)key : new MapJoinKeyObject(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerDirectAccess.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerDirectAccess.java new file mode 100644 index 0000000..2164d38 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerDirectAccess.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.persistence; + + +import java.io.IOException; + +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.io.Writable; + +public interface MapJoinTableContainerDirectAccess { + + void put(Writable currentKey, Writable currentValue) throws SerDeException, IOException; + +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/ReusableGetAdaptorDirectAccess.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/ReusableGetAdaptorDirectAccess.java new file mode 100644 index 0000000..0685d84 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/ReusableGetAdaptorDirectAccess.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.persistence; + + +import org.apache.hadoop.hive.ql.exec.JoinUtil.JoinResult; + +public interface ReusableGetAdaptorDirectAccess { + + JoinResult setDirect(byte[] bytes, int offset, int length, + BytesBytesMultiHashMap.Result hashMapResult); + + int directSpillPartitionId(); +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java index 9034253..ba5a797 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java @@ -110,6 +110,8 @@ public void load(MapJoinTableContainer[] mapJoinTables, if (!MapJoinBytesTableContainer.isSupportedKey(keyOi)) { if (isFirstKey) { useOptimizedTables = false; + LOG.info(describeOi("Not using optimized hash table. " + + "Only a subset of mapjoin keys is supported. Unsupported key: ", keyOi)); } else { throw new HiveException(describeOi( "Only a subset of mapjoin keys is supported. Unsupported key: ", keyOi)); @@ -125,6 +127,7 @@ public void load(MapJoinTableContainer[] mapJoinTables, desc.getParentDataSizes().get(pos)) : new MapJoinBytesTableContainer(hconf, valCtx, keyCount, memUsage)) : new HashMapWrapper(hconf, keyCount); + LOG.info("Using tableContainer " + tableContainer.getClass().getSimpleName()); while (kvReader.next()) { tableContainer.putRow(keyCtx, (Writable)kvReader.getCurrentKey(), diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java index 1236df5..cdabe3a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.exec.tez; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; @@ -28,6 +29,9 @@ import org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.VectorDeserializeRow; import org.apache.hadoop.hive.ql.exec.vector.VectorizedBatchUtil; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx; @@ -40,6 +44,9 @@ import org.apache.hadoop.hive.serde2.SerDe; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.SerDeUtils; +import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe; +import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableDeserializeRead; +import org.apache.hadoop.hive.serde2.lazybinary.fast.LazyBinaryDeserializeRead; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; @@ -47,7 +54,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; @@ -85,13 +91,15 @@ private boolean vectorized = false; - private DataOutputBuffer keyBuffer; - private DataOutputBuffer valueBuffer; + private VectorDeserializeRow keyBinarySortableDeserializeToRow; + + private VectorDeserializeRow valueLazyBinaryDeserializeToRow; + private VectorizedRowBatchCtx batchContext; private VectorizedRowBatch batch; // number of columns pertaining to keys in a vectorized row batch - private int keysColumnOffset; + private int firstValueColumnOffset; private final int BATCH_SIZE = VectorizedRowBatch.DEFAULT_SIZE; private StructObjectInspector keyStructInspector; @@ -134,9 +142,7 @@ void init(JobConf jconf, Operator reducer, boolean vectorized, TableDesc keyT if(vectorized) { keyStructInspector = (StructObjectInspector) keyObjectInspector; - keysColumnOffset = keyStructInspector.getAllStructFieldRefs().size(); - keyBuffer = new DataOutputBuffer(); - valueBuffer = new DataOutputBuffer(); + firstValueColumnOffset = keyStructInspector.getAllStructFieldRefs().size(); } // We should initialize the SerDe with the TypeInfo when available. @@ -153,7 +159,7 @@ void init(JobConf jconf, Operator reducer, boolean vectorized, TableDesc keyT /* vectorization only works with struct object inspectors */ valueStructInspectors = (StructObjectInspector) valueObjectInspector; - final int totalColumns = keysColumnOffset + + final int totalColumns = firstValueColumnOffset + valueStructInspectors.getAllStructFieldRefs().size(); valueStringWriters = new ArrayList(totalColumns); valueStringWriters.addAll(Arrays @@ -183,6 +189,36 @@ void init(JobConf jconf, Operator reducer, boolean vectorized, TableDesc keyT batchContext = new VectorizedRowBatchCtx(); batchContext.init(vectorScratchColumnTypeMap, (StructObjectInspector) rowObjectInspector); batch = batchContext.createVectorizedRowBatch(); + + // Setup vectorized deserialization for the key and value. + BinarySortableSerDe binarySortableSerDe = (BinarySortableSerDe) inputKeyDeserializer; + + keyBinarySortableDeserializeToRow = + new VectorDeserializeRow( + new BinarySortableDeserializeRead( + VectorizedBatchUtil.primitiveTypeInfosFromStructObjectInspector( + keyStructInspector), + binarySortableSerDe.getSortOrders())); + keyBinarySortableDeserializeToRow.init(0); + + final int valuesSize = valueStructInspectors.getAllStructFieldRefs().size(); + if (valuesSize > 0) { + valueLazyBinaryDeserializeToRow = + new VectorDeserializeRow( + new LazyBinaryDeserializeRead( + VectorizedBatchUtil.primitiveTypeInfosFromStructObjectInspector( + valueStructInspectors))); + valueLazyBinaryDeserializeToRow.init(firstValueColumnOffset); + + // Create data buffers for value bytes column vectors. + for (int i = firstValueColumnOffset; i < batch.numCols; i++) { + ColumnVector colVector = batch.cols[i]; + if (colVector instanceof BytesColumnVector) { + BytesColumnVector bytesColumnVector = (BytesColumnVector) colVector; + bytesColumnVector.initBuffer(); + } + } + } } else { ois.add(keyObjectInspector); ois.add(valueObjectInspector); @@ -209,9 +245,12 @@ public final boolean isGrouped() { @Override public boolean pushRecord() throws HiveException { - BytesWritable keyWritable; - if (!vectorized && groupIterator.hasNext()) { + if (vectorized) { + return pushRecordVector(); + } + + if (groupIterator.hasNext()) { // if we have records left in the group we push one of those groupIterator.next(); return true; @@ -220,11 +259,11 @@ public boolean pushRecord() throws HiveException { try { if (!reader.next()) { return false; - } else { - keyWritable = (BytesWritable) reader.getCurrentKey(); - valueWritables = reader.getCurrentValues(); } + BytesWritable keyWritable = (BytesWritable) reader.getCurrentKey(); + valueWritables = reader.getCurrentValues(); + //Set the key, check if this is a new group or same group try { keyObject = inputKeyDeserializer.deserialize(keyWritable); @@ -248,13 +287,9 @@ public boolean pushRecord() throws HiveException { reducer.setGroupKeyObject(keyObject); } - if(vectorized) { - processVectors(valueWritables, tag); - } else { - groupIterator.initialize(valueWritables, keyObject, tag); - if (groupIterator.hasNext()) { - groupIterator.next(); // push first record of group - } + groupIterator.initialize(valueWritables, keyObject, tag); + if (groupIterator.hasNext()) { + groupIterator.next(); // push first record of group } return true; } catch (Throwable e) { @@ -338,37 +373,82 @@ public void next() throws HiveException { } } + private boolean pushRecordVector() { + try { + if (!reader.next()) { + return false; + } + + BytesWritable keyWritable = (BytesWritable) reader.getCurrentKey(); + valueWritables = reader.getCurrentValues(); + + // Check if this is a new group or same group + if (handleGroupKey && !keyWritable.equals(this.groupKey)) { + // If a operator wants to do some work at the beginning of a group + if (groupKey == null) { // the first group + this.groupKey = new BytesWritable(); + } else { + // If a operator wants to do some work at the end of a group + reducer.endGroup(); + } + + groupKey.set(keyWritable.getBytes(), 0, keyWritable.getLength()); + reducer.startGroup(); + } + + processVectorGroup(keyWritable, valueWritables, tag); + return true; + } catch (Throwable e) { + abort = true; + if (e instanceof OutOfMemoryError) { + // Don't create a new object if we are already out of memory + throw (OutOfMemoryError) e; + } else { + l4j.fatal(StringUtils.stringifyException(e)); + throw new RuntimeException(e); + } + } + } + /** * @param values * @return true if it is not done and can take more inputs */ - private void processVectors(Iterable values, byte tag) throws HiveException { - /* deserialize key into columns */ - VectorizedBatchUtil.addRowToBatchFrom(keyObject, keyStructInspector, - 0, 0, batch, keyBuffer); - for(int i = 0; i < keysColumnOffset; i++) { + private void processVectorGroup(BytesWritable keyWritable, + Iterable values, byte tag) throws HiveException, IOException { + + // Deserialize key into vector row columns. + // Since we referencing byte column vector byte arrays by reference, we don't need + // a data buffer. + byte[] keyBytes = keyWritable.getBytes(); + int keyLength = keyWritable.getLength(); + keyBinarySortableDeserializeToRow.setBytes(keyBytes, 0, keyLength); + keyBinarySortableDeserializeToRow.deserializeByValue(batch, 0); + for(int i = 0; i < firstValueColumnOffset; i++) { VectorizedBatchUtil.setRepeatingColumn(batch, i); } int rowIdx = 0; try { for (Object value : values) { - /* deserialize value into columns */ - BytesWritable valueWritable = (BytesWritable) value; - Object valueObj = deserializeValue(valueWritable, tag); - - VectorizedBatchUtil.addRowToBatchFrom(valueObj, valueStructInspectors, - rowIdx, keysColumnOffset, batch, valueBuffer); + if (valueLazyBinaryDeserializeToRow != null) { + // Deserialize value into vector row columns. + BytesWritable valueWritable = (BytesWritable) value; + byte[] valueBytes = valueWritable.getBytes(); + int valueLength = valueWritable.getLength(); + valueLazyBinaryDeserializeToRow.setBytes(valueBytes, 0, valueLength); + valueLazyBinaryDeserializeToRow.deserializeByValue(batch, rowIdx); + } rowIdx++; if (rowIdx >= BATCH_SIZE) { VectorizedBatchUtil.setBatchSize(batch, rowIdx); reducer.process(batch, tag); // Reset just the value columns and value buffer. - for (int i = keysColumnOffset; i < batch.numCols; i++) { + for (int i = firstValueColumnOffset; i < batch.numCols; i++) { + // Note that reset also resets the data buffer for bytes column vectors. batch.cols[i].reset(); } - valueBuffer.reset(); rowIdx = 0; } } @@ -378,8 +458,6 @@ private void processVectors(Iterable values, byte tag) throws HiveExcept reducer.process(batch, tag); } batch.reset(); - keyBuffer.reset(); - valueBuffer.reset(); } catch (Exception e) { String rowString = null; try { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAppMasterEventOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAppMasterEventOperator.java index 7aa279a..df82d21 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAppMasterEventOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAppMasterEventOperator.java @@ -23,14 +23,14 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator; -import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; -import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.AppMasterEventDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.io.ObjectWritable; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.io.Writable; /** @@ -40,14 +40,23 @@ private static final long serialVersionUID = 1L; - protected transient Object[] singleRow; + private VectorizationContext vContext; + + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- - protected transient VectorExpressionWriter[] valueWriters; + private transient boolean firstBatch; + + private transient VectorExtractRowDynBatch vectorExtractRowDynBatch; + + protected transient Object[] singleRow; - public VectorAppMasterEventOperator(VectorizationContext context, + public VectorAppMasterEventOperator(VectorizationContext vContext, OperatorDesc conf) { super(); this.conf = (AppMasterEventDesc) conf; + this.vContext = vContext; } public VectorAppMasterEventOperator() { @@ -55,70 +64,76 @@ public VectorAppMasterEventOperator() { @Override public Collection> initializeOp(Configuration hconf) throws HiveException { + + // We need a input object inspector that is for the row we will extract out of the + // vectorized row batch, not for example, an original inspector for an ORC table, etc. + inputObjInspectors[0] = + VectorizedBatchUtil.convertToStandardStructObjectInspector((StructObjectInspector) inputObjInspectors[0]); + + // Call AppMasterEventOperator with new input inspector. Collection> result = super.initializeOp(hconf); - valueWriters = VectorExpressionWriterFactory.getExpressionWriters( - (StructObjectInspector) inputObjInspectors[0]); - singleRow = new Object[valueWriters.length]; + assert result.isEmpty(); + + firstBatch = true; + return result; } @Override public void process(Object data, int tag) throws HiveException { - VectorizedRowBatch vrg = (VectorizedRowBatch) data; + if (hasReachedMaxSize) { + return; + } - Writable [] records = null; - Writable recordValue = null; - boolean vectorizedSerde = false; + VectorizedRowBatch batch = (VectorizedRowBatch) data; + if (firstBatch) { + vectorExtractRowDynBatch = new VectorExtractRowDynBatch(); + vectorExtractRowDynBatch.init((StructObjectInspector) inputObjInspectors[0], vContext.getProjectedColumns()); - try { - if (serializer instanceof VectorizedSerde) { - recordValue = ((VectorizedSerde) serializer).serializeVector(vrg, - inputObjInspectors[0]); - records = (Writable[]) ((ObjectWritable) recordValue).get(); - vectorizedSerde = true; - } - } catch (SerDeException e1) { - throw new HiveException(e1); + singleRow = new Object[vectorExtractRowDynBatch.getCount()]; + + firstBatch = false; } - for (int i = 0; i < vrg.size; i++) { - Writable row = null; - if (vectorizedSerde) { - row = records[i]; - } else { - if (vrg.valueWriters == null) { - vrg.setValueWriters(this.valueWriters); - } - try { - row = serializer.serialize(getRowObject(vrg, i), inputObjInspectors[0]); - } catch (SerDeException ex) { - throw new HiveException(ex); + vectorExtractRowDynBatch.setBatchOnEntry(batch); + + ObjectInspector rowInspector = inputObjInspectors[0]; + try { + Writable writableRow; + if (batch.selectedInUse) { + int selected[] = batch.selected; + for (int logical = 0 ; logical < batch.size; logical++) { + int batchIndex = selected[logical]; + vectorExtractRowDynBatch.extractRow(batchIndex, singleRow); + writableRow = serializer.serialize(singleRow, rowInspector); + writableRow.write(buffer); + if (buffer.getLength() > MAX_SIZE) { + LOG.info("Disabling AM events. Buffer size too large: " + buffer.getLength()); + hasReachedMaxSize = true; + buffer = null; + break; + } } - } - try { - row.write(buffer); - if (buffer.getLength() > MAX_SIZE) { - LOG.info("Disabling AM events. Buffer size too large: " + buffer.getLength()); - hasReachedMaxSize = true; - buffer = null; + } else { + for (int batchIndex = 0 ; batchIndex < batch.size; batchIndex++) { + vectorExtractRowDynBatch.extractRow(batchIndex, singleRow); + writableRow = serializer.serialize(singleRow, rowInspector); + writableRow.write(buffer); + if (buffer.getLength() > MAX_SIZE) { + LOG.info("Disabling AM events. Buffer size too large: " + buffer.getLength()); + hasReachedMaxSize = true; + buffer = null; + break; + } } - } catch (Exception e) { - throw new HiveException(e); } + } catch (Exception e) { + throw new HiveException(e); } - } - private Object[] getRowObject(VectorizedRowBatch vrg, int rowIndex) - throws HiveException { - int batchIndex = rowIndex; - if (vrg.selectedInUse) { - batchIndex = vrg.selected[rowIndex]; - } - for (int i = 0; i < vrg.projectionSize; i++) { - ColumnVector vectorColumn = vrg.cols[vrg.projectedColumns[i]]; - singleRow[i] = vrg.valueWriters[i].writeValue(vectorColumn, batchIndex); - } - return singleRow; + forward(data, rowInspector); + + vectorExtractRowDynBatch.forgetBatchOnExit(); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnOrderedMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnOrderedMap.java index 96a4f83..d2eb26a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnOrderedMap.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnOrderedMap.java @@ -84,7 +84,7 @@ public VectorColumnOrderedMap() { public void add(int orderedColumn, int valueColumn, String typeName) { if (orderedTreeMap.containsKey(orderedColumn)) { - throw new Error("Duplicate column " + orderedColumn + " in ordered column map"); + throw new RuntimeException("Duplicate column " + orderedColumn + " in ordered column map"); } orderedTreeMap.put(orderedColumn, new Value(valueColumn, typeName)); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorCopyRow.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorCopyRow.java index e010e45..0058141 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorCopyRow.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorCopyRow.java @@ -210,7 +210,7 @@ public void init(VectorColumnMapping columnMapping) { } else if (VectorizationContext.decimalTypePattern.matcher(typeName).matches()){ copyRowByValue = new DecimalCopyRow(inputColumn, outputColumn); } else { - throw new Error("Cannot allocate vector copy row for " + typeName); + throw new RuntimeException("Cannot allocate vector copy row for " + typeName); } subRowToBatchCopiersByValue[i] = copyRowByValue; if (copyRowByReference == null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorDeserializeRow.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorDeserializeRow.java index 22106c6..8452abd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorDeserializeRow.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorDeserializeRow.java @@ -651,11 +651,22 @@ public void init(int[] outputColumns) throws HiveException { } } + public void init(List outputColumns) throws HiveException { + + readersByValue = new Reader[primitiveTypeInfos.length]; + readersByReference = new Reader[primitiveTypeInfos.length]; + + for (int i = 0; i < primitiveTypeInfos.length; i++) { + int outputColumn = outputColumns.get(i); + addReader(i, outputColumn); + } + } + public void init(int startColumn) throws HiveException { readersByValue = new Reader[primitiveTypeInfos.length]; readersByReference = new Reader[primitiveTypeInfos.length]; - + for (int i = 0; i < primitiveTypeInfos.length; i++) { int outputColumn = startColumn + i; addReader(i, outputColumn); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java index bfa8134..2ccc9a5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java @@ -23,12 +23,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; -import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; -import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; /** @@ -38,14 +35,23 @@ private static final long serialVersionUID = 1L; - protected transient Object[] singleRow; + private VectorizationContext vContext; + + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- - protected transient VectorExpressionWriter[] valueWriters; + private transient boolean firstBatch; - public VectorFileSinkOperator(VectorizationContext context, + private transient VectorExtractRowDynBatch vectorExtractRowDynBatch; + + protected transient Object[] singleRow; + + public VectorFileSinkOperator(VectorizationContext vContext, OperatorDesc conf) { super(); this.conf = (FileSinkDesc) conf; + this.vContext = vContext; } public VectorFileSinkOperator() { @@ -54,43 +60,49 @@ public VectorFileSinkOperator() { @Override protected Collection> initializeOp(Configuration hconf) throws HiveException { + // We need a input object inspector that is for the row we will extract out of the // vectorized row batch, not for example, an original inspector for an ORC table, etc. - VectorExpressionWriterFactory.processVectorInspector( - (StructObjectInspector) inputObjInspectors[0], - new VectorExpressionWriterFactory.SingleOIDClosure() { - @Override - public void assign(VectorExpressionWriter[] writers, - ObjectInspector objectInspector) { - valueWriters = writers; - inputObjInspectors[0] = objectInspector; - } - }); - singleRow = new Object[valueWriters.length]; + inputObjInspectors[0] = + VectorizedBatchUtil.convertToStandardStructObjectInspector((StructObjectInspector) inputObjInspectors[0]); // Call FileSinkOperator with new input inspector. - return super.initializeOp(hconf); + Collection> result = super.initializeOp(hconf); + assert result.isEmpty(); + + firstBatch = true; + + return result; } @Override public void process(Object data, int tag) throws HiveException { - VectorizedRowBatch vrg = (VectorizedRowBatch)data; - for (int i = 0; i < vrg.size; i++) { - Object[] row = getRowObject(vrg, i); - super.process(row, tag); - } - } + VectorizedRowBatch batch = (VectorizedRowBatch) data; + if (firstBatch) { + vectorExtractRowDynBatch = new VectorExtractRowDynBatch(); + vectorExtractRowDynBatch.init((StructObjectInspector) inputObjInspectors[0], vContext.getProjectedColumns()); + + singleRow = new Object[vectorExtractRowDynBatch.getCount()]; - private Object[] getRowObject(VectorizedRowBatch vrg, int rowIndex) - throws HiveException { - int batchIndex = rowIndex; - if (vrg.selectedInUse) { - batchIndex = vrg.selected[rowIndex]; + firstBatch = false; } - for (int i = 0; i < vrg.projectionSize; i++) { - ColumnVector vectorColumn = vrg.cols[vrg.projectedColumns[i]]; - singleRow[i] = valueWriters[i].writeValue(vectorColumn, batchIndex); + + vectorExtractRowDynBatch.setBatchOnEntry(batch); + + if (batch.selectedInUse) { + int selected[] = batch.selected; + for (int logical = 0 ; logical < batch.size; logical++) { + int batchIndex = selected[logical]; + vectorExtractRowDynBatch.extractRow(batchIndex, singleRow); + super.process(singleRow, tag); + } + } else { + for (int batchIndex = 0 ; batchIndex < batch.size; batchIndex++) { + vectorExtractRowDynBatch.extractRow(batchIndex, singleRow); + super.process(singleRow, tag); + } } - return singleRow; + + vectorExtractRowDynBatch.forgetBatchOnExit(); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java index bd4fb42..39a83e3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java @@ -102,7 +102,7 @@ private transient VectorizedRowBatch outputBatch; private transient VectorizedRowBatchCtx vrbCtx; - private transient VectorColumnAssign[] vectorColumnAssign; + private transient VectorAssignRowSameBatch vectorAssignRowSameBatch; private transient int numEntriesHashTable; @@ -776,6 +776,7 @@ public VectorGroupByOperator() { @Override protected Collection> initializeOp(Configuration hconf) throws HiveException { Collection> result = super.initializeOp(hconf); + assert result.isEmpty(); List objectInspectors = new ArrayList(); @@ -812,11 +813,12 @@ public VectorGroupByOperator() { outputObjInspector = ObjectInspectorFactory.getStandardStructObjectInspector( outputFieldNames, objectInspectors); if (isVectorOutput) { - vrbCtx = new VectorizedRowBatchCtx(); - vrbCtx.init(vOutContext.getScratchColumnTypeMap(), (StructObjectInspector) outputObjInspector); - outputBatch = vrbCtx.createVectorizedRowBatch(); - vectorColumnAssign = VectorColumnAssignFactory.buildAssigners( - outputBatch, outputObjInspector, vOutContext.getProjectionColumnMap(), conf.getOutputColumnNames()); + vrbCtx = new VectorizedRowBatchCtx(); + vrbCtx.init(vOutContext.getScratchColumnTypeMap(), (StructObjectInspector) outputObjInspector); + outputBatch = vrbCtx.createVectorizedRowBatch(); + vectorAssignRowSameBatch = new VectorAssignRowSameBatch(); + vectorAssignRowSameBatch.init((StructObjectInspector) outputObjInspector, vOutContext.getProjectedColumns()); + vectorAssignRowSameBatch.setOneBatch(outputBatch); } } catch (HiveException he) { @@ -902,12 +904,12 @@ private void writeSingleRow(VectorHashKeyWrapper kw, VectorAggregationBufferRow } else { // Output keys and aggregates into the output batch. for (int i = 0; i < outputKeyLength; ++i) { - vectorColumnAssign[fi++].assignObjectValue(keyWrappersBatch.getWritableKeyValue ( - kw, i, keyOutputWriters[i]), outputBatch.size); + vectorAssignRowSameBatch.assignRowColumn(outputBatch.size, fi++, + keyWrappersBatch.getWritableKeyValue (kw, i, keyOutputWriters[i])); } for (int i = 0; i < aggregators.length; ++i) { - vectorColumnAssign[fi++].assignObjectValue(aggregators[i].evaluateOutput( - agg.getAggregationBuffer(i)), outputBatch.size); + vectorAssignRowSameBatch.assignRowColumn(outputBatch.size, fi++, + aggregators[i].evaluateOutput(agg.getAggregationBuffer(i))); } ++outputBatch.size; if (outputBatch.size == VectorizedRowBatch.DEFAULT_SIZE) { @@ -927,8 +929,8 @@ private void writeGroupRow(VectorAggregationBufferRow agg, DataOutputBuffer buff throws HiveException { int fi = outputKeyLength; // Start after group keys. for (int i = 0; i < aggregators.length; ++i) { - vectorColumnAssign[fi++].assignObjectValue(aggregators[i].evaluateOutput( - agg.getAggregationBuffer(i)), outputBatch.size); + vectorAssignRowSameBatch.assignRowColumn(outputBatch.size, fi++, + aggregators[i].evaluateOutput(agg.getAggregationBuffer(i))); } ++outputBatch.size; if (outputBatch.size == VectorizedRowBatch.DEFAULT_SIZE) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java index bbc8d60..534a906 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java @@ -74,7 +74,7 @@ private transient VectorizedRowBatch outputBatch; private transient VectorizedRowBatch scratchBatch; // holds restored (from disk) big table rows private transient VectorExpressionWriter[] valueWriters; - private transient Map outputVectorAssigners; + private transient Map outputVectorAssignRowMap; // These members are used as out-of-band params // for the inner-loop supper.processOp callbacks @@ -125,6 +125,7 @@ public VectorMapJoinOperator (VectorizationContext vContext, OperatorDesc conf) @Override public Collection> initializeOp(Configuration hconf) throws HiveException { + // Code borrowed from VectorReduceSinkOperator.initializeOp VectorExpressionWriterFactory.processVectorInspector( (StructObjectInspector) inputObjInspectors[0], @@ -202,7 +203,8 @@ protected Object _evaluate(Object row, int version) throws HiveException { // Filtering is handled in the input batch processing filterMaps[posBigTable] = null; - outputVectorAssigners = new HashMap(); + outputVectorAssignRowMap = new HashMap(); + return result; } @@ -212,15 +214,16 @@ protected Object _evaluate(Object row, int version) throws HiveException { @Override protected void internalForward(Object row, ObjectInspector outputOI) throws HiveException { Object[] values = (Object[]) row; - VectorColumnAssign[] vcas = outputVectorAssigners.get(outputOI); - if (null == vcas) { - vcas = VectorColumnAssignFactory.buildAssigners( - outputBatch, outputOI, vOutContext.getProjectionColumnMap(), conf.getOutputColumnNames()); - outputVectorAssigners.put(outputOI, vcas); - } - for (int i=0; i> initializeOp(Configuration hconf) throws HiveException { + // We need a input object inspector that is for the row we will extract out of the // vectorized row batch, not for example, an original inspector for an ORC table, etc. - VectorExpressionWriterFactory.processVectorInspector( - (StructObjectInspector) inputObjInspectors[0], - new VectorExpressionWriterFactory.SingleOIDClosure() { - @Override - public void assign(VectorExpressionWriter[] writers, - ObjectInspector objectInspector) { - rowWriters = writers; - inputObjInspectors[0] = objectInspector; - } - }); - singleRow = new Object[rowWriters.length]; - - return super.initializeOp(hconf); + inputObjInspectors[0] = + VectorizedBatchUtil.convertToStandardStructObjectInspector((StructObjectInspector) inputObjInspectors[0]); + + // Call ReduceSinkOperator with new input inspector. + Collection> result = super.initializeOp(hconf); + assert result.isEmpty(); + + firstBatch = true; + + return result; } @Override public void process(Object data, int tag) throws HiveException { - VectorizedRowBatch vrg = (VectorizedRowBatch) data; - for (int batchIndex = 0 ; batchIndex < vrg.size; ++batchIndex) { - Object row = getRowObject(vrg, batchIndex); - super.process(row, tag); - } - } + VectorizedRowBatch batch = (VectorizedRowBatch) data; + if (firstBatch) { + vectorExtractRowDynBatch = new VectorExtractRowDynBatch(); + vectorExtractRowDynBatch.init((StructObjectInspector) inputObjInspectors[0], vContext.getProjectedColumns()); - private Object[] getRowObject(VectorizedRowBatch vrg, int rowIndex) - throws HiveException { - int batchIndex = rowIndex; - if (vrg.selectedInUse) { - batchIndex = vrg.selected[rowIndex]; + singleRow = new Object[vectorExtractRowDynBatch.getCount()]; + + firstBatch = false; } - for (int i = 0; i < vrg.projectionSize; i++) { - ColumnVector vectorColumn = vrg.cols[vrg.projectedColumns[i]]; - if (vectorColumn != null) { - singleRow[i] = rowWriters[i].writeValue(vectorColumn, batchIndex); - } else { - // Some columns from tables are not used. - singleRow[i] = null; + + vectorExtractRowDynBatch.setBatchOnEntry(batch); + + // VectorizedBatchUtil.debugDisplayBatch( batch, "VectorReduceSinkOperator processOp "); + + if (batch.selectedInUse) { + int selected[] = batch.selected; + for (int logical = 0 ; logical < batch.size; logical++) { + int batchIndex = selected[logical]; + vectorExtractRowDynBatch.extractRow(batchIndex, singleRow); + super.process(singleRow, tag); + } + } else { + for (int batchIndex = 0 ; batchIndex < batch.size; batchIndex++) { + vectorExtractRowDynBatch.extractRow(batchIndex, singleRow); + super.process(singleRow, tag); } } - return singleRow; + + vectorExtractRowDynBatch.forgetBatchOnExit(); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java index 1fe5c4e..a2f8091 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java @@ -73,7 +73,7 @@ private transient VectorHashKeyWrapperBatch keyWrapperBatch; - private transient Map outputVectorAssigners; + private transient Map outputVectorAssignRowMap; private transient int batchIndex = -1; @@ -141,7 +141,7 @@ public VectorSMBMapJoinOperator(VectorizationContext vContext, OperatorDesc conf keyWrapperBatch = VectorHashKeyWrapperBatch.compileKeyWrapperBatch(keyExpressions); - outputVectorAssigners = new HashMap(); + outputVectorAssignRowMap = new HashMap(); // This key evaluator translates from the vectorized VectorHashKeyWrapper format // into the row-mode MapJoinKey @@ -270,15 +270,16 @@ public void closeOp(boolean aborted) throws HiveException { @Override protected void internalForward(Object row, ObjectInspector outputOI) throws HiveException { Object[] values = (Object[]) row; - VectorColumnAssign[] vcas = outputVectorAssigners.get(outputOI); - if (null == vcas) { - vcas = VectorColumnAssignFactory.buildAssigners( - outputBatch, outputOI, vOutContext.getProjectionColumnMap(), conf.getOutputColumnNames()); - outputVectorAssigners.put(outputOI, vcas); - } - for (int i = 0; i < values.length; ++i) { - vcas[i].assignObjectValue(values[i], outputBatch.size); + VectorAssignRowSameBatch va = outputVectorAssignRowMap.get(outputOI); + if (va == null) { + va = new VectorAssignRowSameBatch(); + va.init((StructObjectInspector) outputOI, vOutContext.getProjectedColumns()); + va.setOneBatch(outputBatch); + outputVectorAssignRowMap.put(outputOI, va); } + + va.assignRow(outputBatch.size, values); + ++outputBatch.size; if (outputBatch.size == VectorizedRowBatch.DEFAULT_SIZE) { flushOutput(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRow.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRow.java index ceb18c6..e2934e3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRow.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRow.java @@ -582,6 +582,7 @@ private Writer createWriter(TypeInfo typeInfo, int columnIndex) throws HiveExcep } public void init(List typeNames, int[] columnMap) throws HiveException { + writers = new Writer[typeNames.size()]; for (int i = 0; i < typeNames.size(); i++) { String typeName = typeNames.get(i); @@ -593,6 +594,7 @@ public void init(List typeNames, int[] columnMap) throws HiveException { } public void init(List typeNames) throws HiveException { + writers = new Writer[typeNames.size()]; for (int i = 0; i < typeNames.size(); i++) { String typeName = typeNames.get(i); @@ -602,6 +604,17 @@ public void init(List typeNames) throws HiveException { } } + public void init(PrimitiveTypeInfo[] primitiveTypeInfos, List columnMap) + throws HiveException { + + writers = new Writer[primitiveTypeInfos.length]; + for (int i = 0; i < primitiveTypeInfos.length; i++) { + int columnIndex = columnMap.get(i); + Writer writer = createWriter(primitiveTypeInfos[i], columnIndex); + writers[i] = writer; + } + } + public int getCount() { return writers.length; } @@ -610,6 +623,10 @@ public void setOutput(Output output) { serializeWrite.set(output); } + public void setOutputAppend(Output output) { + serializeWrite.setAppend(output); + } + /* * Note that when serializing a row, the logical mapping using selected in use has already * been performed. batchIndex is the actual index of the row. diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRowNoNulls.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRowNoNulls.java index 4a9fdcd..1363004 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRowNoNulls.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRowNoNulls.java @@ -355,6 +355,7 @@ private Writer createWriter(TypeInfo typeInfo, int columnIndex) throws HiveExcep } public void init(List typeNames, int[] columnMap) throws HiveException { + writers = new Writer[typeNames.size()]; for (int i = 0; i < typeNames.size(); i++) { String typeName = typeNames.get(i); @@ -366,6 +367,7 @@ public void init(List typeNames, int[] columnMap) throws HiveException { } public void init(List typeNames) throws HiveException { + writers = new Writer[typeNames.size()]; for (int i = 0; i < typeNames.size(); i++) { String typeName = typeNames.get(i); @@ -375,6 +377,17 @@ public void init(List typeNames) throws HiveException { } } + public void init(PrimitiveTypeInfo[] primitiveTypeInfos, List columnMap) + throws HiveException { + + writers = new Writer[primitiveTypeInfos.length]; + for (int i = 0; i < primitiveTypeInfos.length; i++) { + int columnIndex = columnMap.get(i); + Writer writer = createWriter(primitiveTypeInfos[i], columnIndex); + writers[i] = writer; + } + } + public int getCount() { return writers.length; } @@ -383,6 +396,10 @@ public void setOutput(Output output) { serializeWrite.set(output); } + public void setOutputAppend(Output output) { + serializeWrite.setAppend(output); + } + /* * Note that when serializing a row, the logical mapping using selected in use has already * been performed. batchIndex is the actual index of the row. diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedBatchUtil.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedBatchUtil.java index e9cf6b6..dcea8ae 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedBatchUtil.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedBatchUtil.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.sql.Timestamp; import java.util.ArrayList; +import java.util.Arrays; import java.util.LinkedList; import java.util.List; @@ -581,6 +582,52 @@ public static StandardStructObjectInspector convertToStandardStructObjectInspect return result; } + public static PrimitiveTypeInfo[] primitiveTypeInfosFromTypeNames( + String[] typeNames) throws HiveException { + + PrimitiveTypeInfo[] result = new PrimitiveTypeInfo[typeNames.length]; + + for(int i = 0; i < typeNames.length; i++) { + TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeNames[i]); + result[i] = (PrimitiveTypeInfo) typeInfo; + } + return result; + } + + /** + * Make a new (scratch) batch, which is exactly "like" the batch provided, except that it's empty + * @param batch the batch to imitate + * @return the new batch + * @throws HiveException + */ + public static VectorizedRowBatch makeLike(VectorizedRowBatch batch) throws HiveException { + VectorizedRowBatch newBatch = new VectorizedRowBatch(batch.numCols); + for (int i = 0; i < batch.numCols; i++) { + ColumnVector colVector = batch.cols[i]; + if (colVector != null) { + ColumnVector newColVector; + if (colVector instanceof LongColumnVector) { + newColVector = new LongColumnVector(); + } else if (colVector instanceof DoubleColumnVector) { + newColVector = new DoubleColumnVector(); + } else if (colVector instanceof BytesColumnVector) { + newColVector = new BytesColumnVector(); + } else if (colVector instanceof DecimalColumnVector) { + DecimalColumnVector decColVector = (DecimalColumnVector) colVector; + newColVector = new DecimalColumnVector(decColVector.precision, decColVector.scale); + } else { + throw new HiveException("Column vector class " + colVector.getClass().getName() + + " is not supported!"); + } + newBatch.cols[i] = newColVector; + newBatch.cols[i].init(); + } + } + newBatch.projectedColumns = Arrays.copyOf(batch.projectedColumns, batch.projectedColumns.length); + newBatch.projectionSize = batch.projectionSize; + newBatch.reset(); + return newBatch; + } public static String displayBytes(byte[] bytes, int start, int length) { StringBuilder sb = new StringBuilder(); @@ -632,7 +679,7 @@ public static void debugDisplayOneRow(VectorizedRowBatch batch, int index, Strin } sb.append(" "); } - System.out.println(sb.toString()); + LOG.info(sb.toString()); } public static void debugDisplayBatch(VectorizedRowBatch batch, String prefix) throws HiveException { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java index 77c3652..68c4df7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java @@ -670,7 +670,7 @@ public static ColumnVector allocateColumnVector(String type, int defaultSize) { type.equalsIgnoreCase(serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME)) { return new LongColumnVector(defaultSize); } else { - throw new Error("Cannot allocate vector column for " + type); + throw new RuntimeException("Cannot allocate vector column for " + type); } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IdentityExpression.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IdentityExpression.java index 56e0d39..402d0f8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IdentityExpression.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IdentityExpression.java @@ -45,6 +45,15 @@ public void evaluate(VectorizedRowBatch batch) { } } + public static boolean isColumnOnly(VectorExpression ve) { + if (ve instanceof IdentityExpression) { + VectorExpression identityExpression = (IdentityExpression) ve; + return (identityExpression.childExpressions == null); + } else { + return false; + } + } + @Override public int getOutputColumn() { return colNum; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java index b2798d2..bbf8862 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java @@ -453,8 +453,7 @@ public static VectorExpressionWriter genVectorExpressionWritable( case UNION: case MAP: case LIST: - throw new IllegalArgumentException("Unsupported complex type: " + - fieldObjInspector.getCategory()); + return genVectorExpressionWritableEmpty(); default: throw new IllegalArgumentException("Unknown type " + fieldObjInspector.getCategory()); @@ -1113,6 +1112,32 @@ public Object initValue(Object ignored) { }.init(fieldObjInspector); } + // For complex types like STRUCT, MAP, etc we do not support, we need a writer that + // does nothing. We assume the Vectorizer class has not validated the query to actually + // try and use the complex types. They do show up in inputObjInspector[0] and need to be + // ignored. + private static VectorExpressionWriter genVectorExpressionWritableEmpty() { + return new VectorExpressionWriterBase() { + + @Override + public Object writeValue(ColumnVector column, int row) + throws HiveException { + return null; + } + + @Override + public Object setValue(Object row, ColumnVector column, int columnRow) + throws HiveException { + return null; + } + + @Override + public Object initValue(Object ost) throws HiveException { + return null; + } + }; + } + /** * Helper function to create an array of writers from a list of expression descriptors. */ diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java new file mode 100644 index 0000000..b434c83 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java @@ -0,0 +1,773 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Future; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.HashTableLoaderFactory; +import org.apache.hadoop.hive.ql.exec.HashTableLoader; +import org.apache.hadoop.hive.ql.exec.MapJoinOperator; +import org.apache.hadoop.hive.ql.exec.MapredContext; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; +import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.VectorColumnMapping; +import org.apache.hadoop.hive.ql.exec.vector.VectorColumnOutputMapping; +import org.apache.hadoop.hive.ql.exec.vector.VectorColumnSourceMapping; +import org.apache.hadoop.hive.ql.exec.vector.VectorCopyRow; +import org.apache.hadoop.hive.ql.exec.vector.VectorDeserializeRow; +import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContextRegion; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedBatchUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx; +import org.apache.hadoop.hive.ql.exec.vector.expressions.IdentityExpression; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized.VectorMapJoinOptimizedCreateHashTable; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTable; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.VectorMapJoinFastHashTableLoader; +import org.apache.hadoop.hive.ql.log.PerfLogger; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.BaseWork; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.MapJoinDesc; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableImplementationType; +import org.apache.hadoop.hive.ql.plan.api.OperatorType; +import org.apache.hadoop.hive.serde2.lazybinary.fast.LazyBinaryDeserializeRead; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; + +/** + * This class is common operator class for native vectorized map join. + * + * It contain common initialization logic. + * + * It is used by both inner and outer joins. + */ +public abstract class VectorMapJoinCommonOperator extends MapJoinOperator implements VectorizationContextRegion { + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinCommonOperator.class.getName()); + + // Whether this operator is an outer join. + protected boolean isOuterJoin; + + // Position of the *single* native vector map join small table. + protected byte posSingleVectorMapJoinSmallTable; + + // The incoming vectorization context. It describes the input big table vectorized row batch. + protected VectorizationContext vContext; + + // This is the vectorized row batch description of the output of the native vectorized map join + // operator. It is based on the incoming vectorization context. Its projection may include + // a mixture of input big table columns and new scratch columns. + protected VectorizationContext vOutContext; + + // The output column projection of the vectorized row batch. And, the type names of the output + // columns. + protected int[] outputProjection; + protected String[] outputTypeNames; + + // These are the vectorized batch expressions for filtering, key expressions, and value + // expressions. + protected VectorExpression[] bigTableFilterExpressions; + protected VectorExpression[] bigTableKeyExpressions; + protected VectorExpression[] bigTableValueExpressions; + + // This is map of which vectorized row batch columns are the big table key columns. Since + // we may have key expressions that produce new scratch columns, we need a mapping. + // And, we have their type names. + protected int[] bigTableKeyColumnMap; + protected ArrayList bigTableKeyTypeNames; + + // Similarly, this is map of which vectorized row batch columns are the big table value columns. + // Since we may have value expressions that produce new scratch columns, we need a mapping. + // And, we have their type names. + protected int[] bigTableValueColumnMap; + protected ArrayList bigTableValueTypeNames; + + // This is a mapping of which big table columns (input and key/value expressions) will be + // part of the big table portion of the join output result. + protected VectorColumnOutputMapping bigTableRetainedMapping; + + // This is a mapping of which keys will be copied from the big table (input and key expressions) + // to the small table result portion of the output for outer join. + protected VectorColumnOutputMapping bigTableOuterKeyMapping; + + // This is a mapping of the values in the small table hash table that will be copied to the + // small table result portion of the output. That is, a mapping of the LazyBinary field order + // to output batch scratch columns for the small table portion. + protected VectorColumnSourceMapping smallTableMapping; + + // These are the columns in the big and small table that are ByteColumnVector columns. + // We create data buffers for these columns so we can copy strings into those columns by value. + protected int[] bigTableByteColumnVectorColumns; + protected int[] smallTableByteColumnVectorColumns; + + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- + + // For debug tracing: the name of the map or reduce task. + protected transient String taskName; + + // Whether we should only use the overflow batch for join output results. + protected transient boolean onlyUseOverflowBatch; + + // The threshold where we should use a repeating vectorized row batch optimization for + // generating join output results. + protected transient boolean useOverflowRepeatedThreshold; + protected transient int overflowRepeatedThreshold; + + // A helper object that efficiently copies the big table columns that are for the big table + // portion of the join output. + protected transient VectorCopyRow bigTableRetainedVectorCopy; + + // A helper object that efficiently copies the big table key columns (input or key expressions) + // that appear in the small table portion of the join output for outer joins. + protected transient VectorCopyRow bigTableVectorCopyOuterKeys; + + // This helper object deserializes LazyBinary format small table values into columns of a row + // in a vectorized row batch. + protected transient VectorDeserializeRow smallTableVectorDeserializeRow; + + // This a 2nd batch with the same "column schema" as the big table batch that can be used to + // build join output results in. If we can create some join output results in the big table + // batch, we will for better efficiency (i.e. avoiding copying). Otherwise, we will use the + // overflow batch. + protected transient VectorizedRowBatch overflowBatch; + + // A scratch batch that will be used to play back big table rows that were spilled + // to disk for the Hybrid Grace hash partitioning. + protected transient VectorizedRowBatch spillReplayBatch; + + // Whether the native vectorized map join operator has performed its common setup. + protected transient boolean needCommonSetup; + + // Whether the native vectorized map join operator has performed its + // native vector map join hash table setup. + protected transient boolean needHashTableSetup; + + // The small table hash table for the native vectorized map join operator. + protected transient VectorMapJoinHashTable vectorMapJoinHashTable; + + public VectorMapJoinCommonOperator() { + super(); + } + + public VectorMapJoinCommonOperator(VectorizationContext vContext, OperatorDesc conf) + throws HiveException { + super(); + + MapJoinDesc desc = (MapJoinDesc) conf; + this.conf = desc; + + this.vContext = vContext; + + /* + * Create a new vectorization context to create a new projection, but keep + * same output column manager must be inherited to track the scratch the columns. + */ + vOutContext = new VectorizationContext(getName(), this.vContext); + + order = desc.getTagOrder(); + posBigTable = (byte) desc.getPosBigTable(); + posSingleVectorMapJoinSmallTable = (order[0] == posBigTable ? order[1] : order[0]); + isOuterJoin = !desc.getNoOuterJoin(); + + Map> filterExpressions = desc.getFilters(); + bigTableFilterExpressions = vContext.getVectorExpressions(filterExpressions.get(posBigTable), + VectorExpressionDescriptor.Mode.FILTER); + + List keyDesc = desc.getKeys().get(posBigTable); + bigTableKeyExpressions = vContext.getVectorExpressions(keyDesc); + + // Since a key expression can be a calculation and the key will go into a scratch column, + // we need the mapping and type information. + bigTableKeyColumnMap = new int[bigTableKeyExpressions.length]; + bigTableKeyTypeNames = new ArrayList(); + boolean onlyColumns = true; + for (int i = 0; i < bigTableKeyColumnMap.length; i++) { + VectorExpression ve = bigTableKeyExpressions[i]; + if (!IdentityExpression.isColumnOnly(ve)) { + onlyColumns = false; + } + bigTableKeyTypeNames.add(keyDesc.get(i).getTypeString()); + bigTableKeyColumnMap[i] = ve.getOutputColumn(); + } + if (onlyColumns) { + bigTableKeyExpressions = null; + } + + List bigTableExprs = desc.getExprs().get(posBigTable); + bigTableValueExpressions = vContext.getVectorExpressions(bigTableExprs); + + /* + * Similarly, we need a mapping since a value expression can be a calculation and the value + * will go into a scratch column. + */ + bigTableValueColumnMap = new int[bigTableValueExpressions.length]; + bigTableValueTypeNames = new ArrayList(); + onlyColumns = true; + for (int i = 0; i < bigTableValueColumnMap.length; i++) { + VectorExpression ve = bigTableValueExpressions[i]; + if (!IdentityExpression.isColumnOnly(ve)) { + onlyColumns = false; + } + bigTableValueTypeNames.add(bigTableExprs.get(i).getTypeString()); + bigTableValueColumnMap[i] = ve.getOutputColumn(); + } + if (onlyColumns) { + bigTableValueExpressions = null; + } + + determineCommonInfo(isOuterJoin); + } + + protected void determineCommonInfo(boolean isOuter) { + + bigTableRetainedMapping = new VectorColumnOutputMapping(); + + bigTableOuterKeyMapping = new VectorColumnOutputMapping(); + + // The order of the fields in the LazyBinary small table value must be used, so + // we use the source ordering flavor for the mapping. + smallTableMapping = new VectorColumnSourceMapping(); + + // We use a mapping object here so we can build the projection in any order and + // get the ordered by 0 to n-1 output columns at the end. + // + // Also, to avoid copying a big table key into the small table result area for inner joins, + // we reference it with the projection so there can be duplicate output columns + // in the projection. + VectorColumnSourceMapping projectionMapping = new VectorColumnSourceMapping(); + + /* + * Gather up big and small table output result information from the MapJoinDesc. + */ + List bigTableRetainList = conf.getRetainList().get(posBigTable); + int bigTableRetainSize = bigTableRetainList.size(); + + int[] smallTableIndices; + int smallTableIndicesSize; + List smallTableExprs = conf.getExprs().get(posSingleVectorMapJoinSmallTable); + if (conf.getValueIndices() != null && conf.getValueIndices().get(posSingleVectorMapJoinSmallTable) != null) { + smallTableIndices = conf.getValueIndices().get(posSingleVectorMapJoinSmallTable); + smallTableIndicesSize = smallTableIndices.length; + } else { + smallTableIndices = null; + smallTableIndicesSize = 0; + } + + List smallTableRetainList = conf.getRetainList().get(posSingleVectorMapJoinSmallTable); + int smallTableRetainSize = smallTableRetainList.size(); + + int smallTableResultSize = 0; + if (smallTableIndicesSize > 0) { + smallTableResultSize = smallTableIndicesSize; + } else if (smallTableRetainSize > 0) { + smallTableResultSize = smallTableRetainSize; + } + + /* + * Determine the big table retained mapping first so we can optimize out (with + * projection) copying inner join big table keys in the subsequent small table results section. + */ + int nextOutputColumn = (order[0] == posBigTable ? 0 : smallTableResultSize); + for (int i = 0; i < bigTableRetainSize; i++) { + + // Since bigTableValueExpressions may do a calculation and produce a scratch column, we + // need to map to the right batch column. + + int retainColumn = bigTableRetainList.get(i); + int batchColumnIndex = bigTableValueColumnMap[retainColumn]; + String typeName = bigTableValueTypeNames.get(i); + + // With this map we project the big table batch to make it look like an output batch. + projectionMapping.add(nextOutputColumn, batchColumnIndex, typeName); + + // Collect columns we copy from the big table batch to the overflow batch. + bigTableRetainedMapping.add(batchColumnIndex, batchColumnIndex, typeName); + + nextOutputColumn++; + } + + /* + * Now determine the small table results. + */ + int firstSmallTableOutputColumn; + firstSmallTableOutputColumn = (order[0] == posBigTable ? bigTableRetainSize : 0); + int smallTableOutputCount = 0; + nextOutputColumn = firstSmallTableOutputColumn; + + // Small table indices has more information (i.e. keys) than retain, so use it if it exists... + if (smallTableIndicesSize > 0) { + smallTableOutputCount = smallTableIndicesSize; + + for (int i = 0; i < smallTableIndicesSize; i++) { + if (smallTableIndices[i] >= 0) { + + // Zero and above numbers indicate a big table key is needed for + // small table result "area". + + int keyIndex = smallTableIndices[i]; + + // Since bigTableKeyExpressions may do a calculation and produce a scratch column, we + // need to map the right column. + int batchKeyColumn = bigTableKeyColumnMap[keyIndex]; + String typeName = bigTableKeyTypeNames.get(keyIndex); + + if (!isOuter) { + + // Optimize inner join keys of small table results. + + // Project the big table key into the small table result "area". + projectionMapping.add(nextOutputColumn, batchKeyColumn, typeName); + + if (!bigTableRetainedMapping.containsOutputColumn(batchKeyColumn)) { + // If necessary, copy the big table key into the overflow batch's small table + // result "area". + bigTableRetainedMapping.add(batchKeyColumn, batchKeyColumn, typeName); + } + } else { + + // For outer joins, since the small table key can be null when there is no match, + // we must have a physical (scratch) column for those keys. We cannot use the + // projection optimization used by inner joins above. + + int scratchColumn = vOutContext.allocateScratchColumn(typeName); + projectionMapping.add(nextOutputColumn, scratchColumn, typeName); + + bigTableRetainedMapping.add(batchKeyColumn, scratchColumn, typeName); + + bigTableOuterKeyMapping.add(batchKeyColumn, scratchColumn, typeName); + } + } else { + + // Negative numbers indicate a column to be (deserialize) read from the small table's + // LazyBinary value row. + int smallTableValueIndex = -smallTableIndices[i] - 1; + + String typeName = smallTableExprs.get(i).getTypeString(); + + // Make a new big table scratch column for the small table value. + int scratchColumn = vOutContext.allocateScratchColumn(typeName); + projectionMapping.add(nextOutputColumn, scratchColumn, typeName); + + smallTableMapping.add(smallTableValueIndex, scratchColumn, typeName); + } + nextOutputColumn++; + } + } else if (smallTableRetainSize > 0) { + smallTableOutputCount = smallTableRetainSize; + + // Only small table values appear in join output result. + + for (int i = 0; i < smallTableRetainSize; i++) { + int smallTableValueIndex = smallTableRetainList.get(i); + + // Make a new big table scratch column for the small table value. + String typeName = smallTableExprs.get(i).getTypeString(); + int scratchColumn = vOutContext.allocateScratchColumn(typeName); + + projectionMapping.add(nextOutputColumn, scratchColumn, typeName); + + smallTableMapping.add(smallTableValueIndex, scratchColumn, typeName); + nextOutputColumn++; + } + } + + // Convert dynamic arrays and maps to simple arrays. + + bigTableRetainedMapping.finalize(); + + bigTableOuterKeyMapping.finalize(); + + smallTableMapping.finalize(); + + // Which big table and small table columns are ByteColumnVector and need have their data buffer + // to be manually reset for some join result processing? + + bigTableByteColumnVectorColumns = getByteColumnVectorColumns(bigTableOuterKeyMapping); + + smallTableByteColumnVectorColumns = getByteColumnVectorColumns(smallTableMapping); + + projectionMapping.finalize(); + + // Verify we added an entry for each output. + assert projectionMapping.isSourceSequenceGood(); + + outputProjection = projectionMapping.getOutputColumns(); + outputTypeNames = projectionMapping.getTypeNames(); + + if (LOG.isDebugEnabled()) { + int[] orderDisplayable = new int[order.length]; + for (int i = 0; i < order.length; i++) { + orderDisplayable[i] = (int) order[i]; + } + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor order " + Arrays.toString(orderDisplayable)); + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor posBigTable " + (int) posBigTable); + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor posSingleVectorMapJoinSmallTable " + (int) posSingleVectorMapJoinSmallTable); + + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor bigTableKeyColumnMap " + Arrays.toString(bigTableKeyColumnMap)); + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor bigTableKeyTypeNames " + bigTableKeyTypeNames); + + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor bigTableValueColumnMap " + Arrays.toString(bigTableValueColumnMap)); + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor bigTableValueTypeNames " + bigTableValueTypeNames); + + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor smallTableIndices " + Arrays.toString(smallTableIndices)); + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor smallTableRetainList " + smallTableRetainList); + + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor firstSmallTableOutputColumn " + firstSmallTableOutputColumn); + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor smallTableOutputCount " + smallTableOutputCount); + + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor bigTableRetainedMapping " + bigTableRetainedMapping.toString()); + + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor bigTableOuterKeyMapping " + bigTableOuterKeyMapping.toString()); + + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor smallTableMapping " + smallTableMapping.toString()); + + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor bigTableByteColumnVectorColumns " + Arrays.toString(bigTableByteColumnVectorColumns)); + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor smallTableByteColumnVectorColumns " + Arrays.toString(smallTableByteColumnVectorColumns)); + + LOG.info(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor outputProjection " + Arrays.toString(outputProjection)); + LOG.info(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor outputTypeNames " + Arrays.toString(outputTypeNames)); + } + + setupVOutContext(conf.getOutputColumnNames()); + } + + /** + * Determine from a mapping which columns are BytesColumnVector columns. + */ + private int[] getByteColumnVectorColumns(VectorColumnMapping mapping) { + // Search mapping for any strings and return their output columns. + ArrayList list = new ArrayList(); + int count = mapping.getCount(); + int[] outputColumns = mapping.getOutputColumns(); + String[] typeNames = mapping.getTypeNames(); + for (int i = 0; i < count; i++) { + int outputColumn = outputColumns[i]; + String typeName = typeNames[i]; + if (VectorizationContext.isStringFamily(typeName)) { + list.add(outputColumn); + } + } + return ArrayUtils.toPrimitive(list.toArray(new Integer[0])); + } + + /** + * Setup the vectorized row batch description of the output of the native vectorized map join + * operator. Use the output projection we previously built from a mixture of input big table + * columns and new scratch columns. + */ + protected void setupVOutContext(List outputColumnNames) { + if (LOG.isDebugEnabled()) { + LOG.info(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor outputColumnNames " + outputColumnNames); + } + if (outputColumnNames.size() != outputProjection.length) { + throw new RuntimeException("Output column names " + outputColumnNames + " length and output projection " + Arrays.toString(outputProjection) + " / " + Arrays.toString(outputTypeNames) + " length mismatch"); + } + vOutContext.resetProjectionColumns(); + for (int i = 0; i < outputColumnNames.size(); ++i) { + String columnName = outputColumnNames.get(i); + int outputColumn = outputProjection[i]; + vOutContext.addProjectionColumn(columnName, outputColumn); + + if (LOG.isDebugEnabled()) { + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor addProjectionColumn " + i + " columnName " + columnName + " outputColumn " + outputColumn); + } + } + } + + /** + * This override lets us substitute our own fast vectorized hash table loader. + */ + @Override + protected HashTableLoader getHashTableLoader(Configuration hconf) { + + VectorMapJoinDesc vectorDesc = conf.getVectorDesc(); + HashTableImplementationType hashTableImplementationType = vectorDesc.hashTableImplementationType(); + HashTableLoader hashTableLoader; + switch (vectorDesc.hashTableImplementationType()) { + case OPTIMIZED: + // Use the Tez hash table loader. + hashTableLoader = HashTableLoaderFactory.getLoader(hconf); + break; + case FAST: + // Use our specialized hash table loader. + hashTableLoader = new VectorMapJoinFastHashTableLoader(); + break; + default: + throw new RuntimeException("Unknown vector map join hash table implementation type " + hashTableImplementationType.name()); + } + return hashTableLoader; + } + + @Override + protected Collection> initializeOp(Configuration hconf) throws HiveException { + Collection> result = super.initializeOp(hconf); + + // Determine the name of our map or reduce task for debug tracing. + BaseWork work = Utilities.getMapWork(hconf); + if (work == null) { + work = Utilities.getReduceWork(hconf); + } + taskName = work.getName(); + + /* + * Get configuration parameters. + */ + onlyUseOverflowBatch = HiveConf.getBoolVar(hconf, + HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_OVERFLOW_BATCH_ONLY); + + overflowRepeatedThreshold = HiveConf.getIntVar(hconf, + HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_OVERFLOW_REPEATED_THRESHOLD); + useOverflowRepeatedThreshold = (overflowRepeatedThreshold >= 0); + + + /* + * Create our vectorized copy row and deserialize row helper objects. + */ + if (smallTableMapping.getCount() > 0) { + smallTableVectorDeserializeRow = new VectorDeserializeRow( + new LazyBinaryDeserializeRead( + VectorizedBatchUtil.primitiveTypeInfosFromTypeNames( + smallTableMapping.getTypeNames()))); + smallTableVectorDeserializeRow.init(smallTableMapping.getOutputColumns()); + } + + if (bigTableRetainedMapping.getCount() > 0) { + bigTableRetainedVectorCopy = new VectorCopyRow(); + bigTableRetainedVectorCopy.init(bigTableRetainedMapping); + } + + if (bigTableOuterKeyMapping.getCount() > 0) { + bigTableVectorCopyOuterKeys = new VectorCopyRow(); + bigTableVectorCopyOuterKeys.init(bigTableOuterKeyMapping); + } + + /* + * Setup the overflow batch. + */ + overflowBatch = setupOverflowBatch(); + + needCommonSetup = true; + needHashTableSetup = true; + + if (LOG.isDebugEnabled()) { + int[] currentScratchColumns = vOutContext.currentScratchColumns(); + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator initializeOp currentScratchColumns " + Arrays.toString(currentScratchColumns)); + + StructObjectInspector structOutputObjectInspector = (StructObjectInspector) outputObjInspector; + List fields = structOutputObjectInspector.getAllStructFieldRefs(); + int i = 0; + for (StructField field : fields) { + LOG.debug("VectorMapJoinInnerBigOnlyCommonOperator initializeOp " + i + " field " + field.getFieldName() + " type " + field.getFieldObjectInspector().getTypeName()); + i++; + } + } + + return result; + } + + @Override + protected Pair loadHashTable( + ExecMapperContext mapContext, MapredContext mrContext) throws HiveException { + + Pair pair; + + VectorMapJoinDesc vectorDesc = conf.getVectorDesc(); + HashTableImplementationType hashTableImplementationType = vectorDesc.hashTableImplementationType(); + HashTableLoader hashTableLoader; + switch (vectorDesc.hashTableImplementationType()) { + case OPTIMIZED: + { + // Using Tez's HashTableLoader, create either a MapJoinBytesTableContainer or + // HybridHashTableContainer. + pair = super.loadHashTable(mapContext, mrContext); + + // Create our vector map join optimized hash table variation *above* the + // map join table container. + MapJoinTableContainer[] mapJoinTableContainers = pair.getLeft(); + vectorMapJoinHashTable = VectorMapJoinOptimizedCreateHashTable.createHashTable(conf, + mapJoinTableContainers[posSingleVectorMapJoinSmallTable]); + } + break; + + case FAST: + { + // Use our VectorMapJoinFastHashTableLoader to create a VectorMapJoinTableContainer. + pair = super.loadHashTable(mapContext, mrContext); + + // Get our vector map join fast hash table variation from the + // vector map join table container. + MapJoinTableContainer[] mapJoinTableContainers = pair.getLeft(); + VectorMapJoinTableContainer vectorMapJoinTableContainer = + (VectorMapJoinTableContainer) mapJoinTableContainers[posSingleVectorMapJoinSmallTable]; + vectorMapJoinHashTable = vectorMapJoinTableContainer.vectorMapJoinHashTable(); + } + break; + default: + throw new RuntimeException("Unknown vector map join hash table implementation type " + hashTableImplementationType.name()); + } + + return pair; + } + + /* + * Setup our 2nd batch with the same "column schema" as the big table batch that can be used to + * build join output results in. + */ + protected VectorizedRowBatch setupOverflowBatch() { + VectorizedRowBatch overflowBatch; + + Map scratchColumnTypeMap = vOutContext.getScratchColumnTypeMap(); + int maxColumn = 0; + for (int i = 0; i < outputProjection.length; i++) { + int outputColumn = outputProjection[i]; + if (maxColumn < outputColumn) { + maxColumn = outputColumn; + } + } + for (int outputColumn : scratchColumnTypeMap.keySet()) { + if (maxColumn < outputColumn) { + maxColumn = outputColumn; + } + } + overflowBatch = new VectorizedRowBatch(maxColumn + 1); + + // First, just allocate just the projection columns we will be using. + for (int i = 0; i < outputProjection.length; i++) { + int outputColumn = outputProjection[i]; + String typeName = outputTypeNames[i]; + allocateOverflowBatchColumnVector(overflowBatch, outputColumn, typeName); + } + + // Now, add any scratch columns needed for children operators. + for (int outputColumn : scratchColumnTypeMap.keySet()) { + String typeName = scratchColumnTypeMap.get(outputColumn); + allocateOverflowBatchColumnVector(overflowBatch, outputColumn, typeName); + } + + overflowBatch.projectedColumns = outputProjection; + overflowBatch.projectionSize = outputProjection.length; + + overflowBatch.reset(); + + return overflowBatch; + } + + /* + * Allocate overflow batch columns by hand. + */ + private void allocateOverflowBatchColumnVector(VectorizedRowBatch overflowBatch, int outputColumn, + String typeName) { + + if (overflowBatch.cols[outputColumn] == null) { + String vectorTypeName; + if (VectorizationContext.isIntFamily(typeName) || + VectorizationContext.isDatetimeFamily(typeName)) { + vectorTypeName = "long"; + } else if (VectorizationContext.isFloatFamily(typeName)) { + vectorTypeName = "double"; + } else if (VectorizationContext.isStringFamily(typeName)) { + vectorTypeName = "string"; + } else if (VectorizationContext.decimalTypePattern.matcher(typeName).matches()){ + vectorTypeName = typeName; // Keep precision and scale. + } else { + throw new RuntimeException("Cannot determine vector type for " + typeName); + } + overflowBatch.cols[outputColumn] = VectorizedRowBatchCtx.allocateColumnVector(vectorTypeName, VectorizedRowBatch.DEFAULT_SIZE); + + if (LOG.isDebugEnabled()) { + LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator initializeOp overflowBatch outputColumn " + outputColumn + " class " + overflowBatch.cols[outputColumn].getClass().getSimpleName()); + } + } + } + + /* + * Common one time setup by native vectorized map join operator's processOp. + */ + protected void commonSetup(VectorizedRowBatch batch) throws HiveException { + LOG.info("VectorMapJoinInnerCommonOperator commonSetup begin..."); + + if (LOG.isDebugEnabled()) { + displayBatchColumns(batch, "batch"); + displayBatchColumns(overflowBatch, "overflowBatch"); + } + + // Make sure big table BytesColumnVectors have room for string values in the overflow batch... + for (int column: bigTableByteColumnVectorColumns) { + BytesColumnVector bytesColumnVector = (BytesColumnVector) overflowBatch.cols[column]; + bytesColumnVector.initBuffer(); + } + + // Make sure small table BytesColumnVectors have room for string values in the big table and + // overflow batchs... + for (int column: smallTableByteColumnVectorColumns) { + BytesColumnVector bytesColumnVector = (BytesColumnVector) batch.cols[column]; + bytesColumnVector.initBuffer(); + bytesColumnVector = (BytesColumnVector) overflowBatch.cols[column]; + bytesColumnVector.initBuffer(); + } + + // Setup a scratch batch that will be used to play back big table rows that were spilled + // to disk for the Hybrid Grace hash partitioning. + spillReplayBatch = VectorizedBatchUtil.makeLike(batch); + + // TEMPORARY -- Set this up for Hybrid Grace logic in MapJoinOperator.closeOp + hashMapRowGetters = new ReusableGetAdaptor[mapJoinTables.length]; + smallTable = posSingleVectorMapJoinSmallTable; + } + + protected void displayBatchColumns(VectorizedRowBatch batch, String batchName) { + LOG.debug("commonSetup " + batchName + " column count " + batch.numCols); + for (int column = 0; column < batch.numCols; column++) { + LOG.debug("commonSetup " + batchName + " column " + column + " type " + (batch.cols[column] == null ? "NULL" : batch.cols[column].getClass().getSimpleName())); + } + } + + @Override + public OperatorType getType() { + return OperatorType.MAPJOIN; + } + + @Override + public VectorizationContext getOuputVectorizationContext() { + return vOutContext; + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java new file mode 100644 index 0000000..2164be4 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java @@ -0,0 +1,859 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.persistence.HybridHashTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.HybridHashTableContainer.HashPartition; +import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.VectorDeserializeRow; +import org.apache.hadoop.hive.ql.exec.vector.VectorSerializeRow; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedBatchUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTableResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMapResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized.VectorMapJoinOptimizedCreateHashTable; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.WriteBuffers.ByteSegmentRef; +import org.apache.hadoop.hive.serde2.lazybinary.fast.LazyBinaryDeserializeRead; +import org.apache.hadoop.hive.serde2.lazybinary.fast.LazyBinarySerializeWrite; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.ByteStream.Output; + +/** + * This class has methods for generating vectorized join results and forwarding batchs. + * + * In some cases when can forward the big table batch by setting scratch columns + * with small table results and then making use of our output projection to pick out all the + * output result columns. This can improve performance by avoiding copying big table values. + * So, we will use the big table batch's selected in use to represent those rows. + * + * At the same time, some output results need to be formed in the overflow batch. + * For example, to form N x M cross product output results. In this case, we will copy big + * table values into the overflow batch and set scratch columns in it for small table results. + * The "schema" of the overflow batch is the same as the big table batch so child operators + * only need one definition of their input batch. The overflow batch will be typically be + * forwarded when it gets full, which might not be during a process call. + * + * NOTE: Child operators should not remember a received batch. + */ + +public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinCommonOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinGenerateResultOperator.class.getName()); + private static final String CLASS_NAME = VectorMapJoinGenerateResultOperator.class.getName(); + + private transient PrimitiveTypeInfo[] bigTablePrimitiveTypeInfos; + + private transient VectorSerializeRow bigTableVectorSerializeRow; + + private transient VectorDeserializeRow bigTableVectorDeserializeRow; + + // Debug display. + protected transient long batchCounter; + + public VectorMapJoinGenerateResultOperator() { + super(); + } + + public VectorMapJoinGenerateResultOperator(VectorizationContext vContext, OperatorDesc conf) + throws HiveException { + super(vContext, conf); + } + + protected void commonSetup(VectorizedRowBatch batch) throws HiveException { + super.commonSetup(batch); + + batchCounter = 0; + + } + + //------------------------------------------------------------------------------------------------ + + protected void performValueExpressions(VectorizedRowBatch batch, + int[] allMatchs, int allMatchCount) { + /* + * For the moment, pretend all matched are selected so we can evaluate the value + * expressions. + * + * Since we may use the overflow batch when generating results, we will assign the + * selected and real batch size later... + */ + int[] saveSelected = batch.selected; + batch.selected = allMatchs; + boolean saveSelectedInUse = batch.selectedInUse; + batch.selectedInUse = true; + batch.size = allMatchCount; + + // Run our value expressions over whole batch. + for(VectorExpression ve: bigTableValueExpressions) { + ve.evaluate(batch); + } + + batch.selected = saveSelected; + batch.selectedInUse = saveSelectedInUse; + } + + //------------------------------------------------------------------------------------------------ + + /* + * Common generate join results from hash maps used by Inner and Outer joins. + */ + + /** + * Generate join results for a single small table value match. + * + * @param batch + * The big table batch. + * @param hashMapResult + * The hash map results for the matching key. + * @param allMatchs + * The selection array for all matches key. + * @param allMatchesIndex + * Index into allMatches of the matching key we are generating for. + * @param duplicateCount + * Number of equal key rows. + * @param numSel + * Current number of rows that are remaining in the big table for forwarding. + * @return + * The new count of selected rows. + */ + protected int generateHashMapResultSingleValue(VectorizedRowBatch batch, + VectorMapJoinHashMapResult hashMapResult, int[] allMatchs, int allMatchesIndex, + int duplicateCount, int numSel) throws HiveException, IOException { + + // Read single value. + + ByteSegmentRef byteSegmentRef = hashMapResult.first(); + + if (!onlyUseOverflowBatch) { + + // Generate result within big table batch itself. + + for (int i = 0; i < duplicateCount; i++) { + + int batchIndex = allMatchs[allMatchesIndex + i]; + + if (bigTableVectorCopyOuterKeys != null) { + // Copy within row. + bigTableVectorCopyOuterKeys.copyByReference(batch, batchIndex, batch, batchIndex); + } + + if (smallTableVectorDeserializeRow != null) { + + byte[] bytes = byteSegmentRef.getBytes(); + int offset = (int) byteSegmentRef.getOffset(); + int length = byteSegmentRef.getLength(); + smallTableVectorDeserializeRow.setBytes(bytes, offset, length); + + smallTableVectorDeserializeRow.deserializeByValue(batch, batchIndex); + } + + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, "generateHashMapResultSingleValue big table"); + + // Use the big table row as output. + batch.selected[numSel++] = batchIndex; + } + } else { + + // Generate result in overflow batch. + + for (int i = 0; i < duplicateCount; i++) { + + int batchIndex = allMatchs[allMatchesIndex + i]; + + // Copy the BigTable values into the overflow batch. Since the overflow batch may + // not get flushed here, we must copy by value. + if (bigTableRetainedVectorCopy != null) { + bigTableRetainedVectorCopy.copyByValue(batch, batchIndex, + overflowBatch, overflowBatch.size); + } + + // Reference the keys we just copied above. + if (bigTableVectorCopyOuterKeys != null) { + bigTableVectorCopyOuterKeys.copyByReference(overflowBatch, overflowBatch.size, + overflowBatch, overflowBatch.size); + } + + if (smallTableVectorDeserializeRow != null) { + + byte[] bytes = byteSegmentRef.getBytes(); + int offset = (int) byteSegmentRef.getOffset(); + int length = byteSegmentRef.getLength(); + smallTableVectorDeserializeRow.setBytes(bytes, offset, length); + + smallTableVectorDeserializeRow.deserializeByValue(overflowBatch, overflowBatch.size); + } + + // VectorizedBatchUtil.debugDisplayOneRow(overflowBatch, overflowBatch.size, "generateHashMapResultSingleValue overflow"); + + overflowBatch.size++; + if (overflowBatch.size == overflowBatch.DEFAULT_SIZE) { + forwardOverflow(); + } + } + } + return numSel; + } + + /** + * Generate results for a N x M cross product. + * + * @param batch + * The big table batch. + * @param hashMapResult + * The hash map results for the matching key. + * @param allMatchs + * The all match selected array that contains (physical) batch indices. + * @param allMatchesIndex + * The index of the match key. + * @param duplicateCount + * Number of equal key rows. + */ + protected void generateHashMapResultMultiValue(VectorizedRowBatch batch, + VectorMapJoinHashMapResult hashMapResult, int[] allMatchs, int allMatchesIndex, + int duplicateCount) throws HiveException, IOException { + + if (useOverflowRepeatedThreshold && + hashMapResult.isCappedCountAvailable() && + hashMapResult.cappedCount() > overflowRepeatedThreshold) { + + // Large cross product: generate the vector optimization using repeating vectorized + // row batch optimization in the overflow batch. + + generateHashMapResultLargeMultiValue( + batch, hashMapResult, allMatchs, allMatchesIndex, duplicateCount); + return; + } + + // We do the cross product of the N big table equal key row's values against the + // small table matching key which has M value rows into overflow batch. + + for (int i = 0; i < duplicateCount; i++) { + + int batchIndex = allMatchs[allMatchesIndex + i]; + + ByteSegmentRef byteSegmentRef = hashMapResult.first(); + while (byteSegmentRef != null) { + + // Copy the BigTable values into the overflow batch. Since the overflow batch may + // not get flushed here, we must copy by value. + if (bigTableRetainedVectorCopy != null) { + bigTableRetainedVectorCopy.copyByValue(batch, batchIndex, + overflowBatch, overflowBatch.size); + } + + // Reference the keys we just copied above. + if (bigTableVectorCopyOuterKeys != null) { + bigTableVectorCopyOuterKeys.copyByReference(overflowBatch, overflowBatch.size, + overflowBatch, overflowBatch.size); + } + + if (smallTableVectorDeserializeRow != null) { + + byte[] bytes = byteSegmentRef.getBytes(); + int offset = (int) byteSegmentRef.getOffset(); + int length = byteSegmentRef.getLength(); + smallTableVectorDeserializeRow.setBytes(bytes, offset, length); + + smallTableVectorDeserializeRow.deserializeByValue(overflowBatch, overflowBatch.size); + } + + // VectorizedBatchUtil.debugDisplayOneRow(overflowBatch, overflowBatch.size, "generateHashMapResultMultiValue overflow"); + + overflowBatch.size++; + if (overflowBatch.size == overflowBatch.DEFAULT_SIZE) { + forwardOverflow(); + } + byteSegmentRef = hashMapResult.next(); + } + } + } + + /** + * Generate optimized results for a large N x M cross product using repeated vectorized row + * batch optimization. + * + * @param batch + * The big table batch. + * @param hashMapResult + * The hash map results for the matching key. + * @param allMatchs + * The all match selected array that contains (physical) batch indices. + * @param allMatchesIndex + * The index of the match key. + * @param duplicateCount + * Number of equal key rows. + */ + private void generateHashMapResultLargeMultiValue(VectorizedRowBatch batch, + VectorMapJoinHashMapResult hashMapResult, int[] allMatchs, int allMatchesIndex, + int duplicateCount) throws HiveException, IOException { + + // Kick out previous overflow batch results. + if (overflowBatch.size > 0) { + forwardOverflow(); + } + + ByteSegmentRef byteSegmentRef = hashMapResult.first(); + while (true) { + + // Fill up as much of the overflow batch as possible with small table values. + while (byteSegmentRef != null) { + + if (smallTableVectorDeserializeRow != null) { + + byte[] bytes = byteSegmentRef.getBytes(); + int offset = (int) byteSegmentRef.getOffset(); + int length = byteSegmentRef.getLength(); + smallTableVectorDeserializeRow.setBytes(bytes, offset, length); + + smallTableVectorDeserializeRow.deserializeByValue(overflowBatch, overflowBatch.DEFAULT_SIZE); + } + + overflowBatch.size++; + if (overflowBatch.size == overflowBatch.DEFAULT_SIZE) { + break; + } + byteSegmentRef = hashMapResult.next(); + } + + // Forward the overflow batch over and over: + // + // Reference a new one big table row's values each time + // cross product + // Current "batch" of small table values. + // + // TODO: This could be further optimized to copy big table (equal) keys once + // and only copy big table values each time... + // And, not set repeating every time... + // + + for (int i = 0; i < duplicateCount; i++) { + + int batchIndex = allMatchs[allMatchesIndex + i]; + + if (bigTableRetainedVectorCopy != null) { + // The one big table row's values repeat. + bigTableRetainedVectorCopy.copyByReference(batch, batchIndex, overflowBatch, 0); + for (int column : bigTableRetainedMapping.getOutputColumns()) { + overflowBatch.cols[column].isRepeating = true; + } + } + if (bigTableVectorCopyOuterKeys != null) { + bigTableVectorCopyOuterKeys.copyByReference(batch, batchIndex, overflowBatch, 0); + for (int column : bigTableOuterKeyMapping.getOutputColumns()) { + overflowBatch.cols[column].isRepeating = true; + } + } + + // Crucial here that we don't reset the overflow batch, or we will loose the small table + // values we put in above. + forwardOverflowNoReset(); + + // Hand reset the big table columns. + for (int column : bigTableRetainedMapping.getOutputColumns()) { + ColumnVector colVector = overflowBatch.cols[column]; + colVector.reset(); + } + + if (bigTableVectorCopyOuterKeys != null) { + for (int column : bigTableOuterKeyMapping.getOutputColumns()) { + ColumnVector colVector = overflowBatch.cols[column]; + colVector.reset(); + } + } + } + + if (hashMapResult.isEof()) { + break; + } + byteSegmentRef = hashMapResult.next(); + + // Get ready for a another round of small table values. + overflowBatch.reset(); + } + // Clear away any residue from our optimizations. + overflowBatch.reset(); + } + + /** + * Generate optimized results when entire batch key is repeated and it matched the hash map. + * + * @param batch + * The big table batch. + * @param hashMapResult + * The hash map results for the repeated key. + * @return + * The new count of selected rows. + */ + protected int generateHashMapResultRepeatedAll(VectorizedRowBatch batch, + VectorMapJoinHashMapResult hashMapResult) throws IOException, HiveException { + + int[] selected = batch.selected; + + if (batch.selectedInUse) { + // The selected array is already filled in as we want it. + } else { + for (int i = 0; i < batch.size; i++) { + selected[i] = i; + } + batch.selectedInUse = true; + } + + int numSel = 0; + if (hashMapResult.isSingleRow()) { + numSel = generateHashMapResultSingleValue(batch, hashMapResult, + batch.selected, 0, batch.size, numSel); + + } else { + generateHashMapResultMultiValue(batch, hashMapResult, + batch.selected, 0, batch.size); + } + + return numSel; + } + + //----------------------------------------------------------------------------------------------- + + /* + * Spill. + */ + + + private void setupSpillSerDe(VectorizedRowBatch batch) throws HiveException { + + PrimitiveTypeInfo[] inputObjInspectorsTypeInfos = + VectorizedBatchUtil.primitiveTypeInfosFromStructObjectInspector( + (StructObjectInspector) inputObjInspectors[posBigTable]); + + List projectedColumns = vContext.getProjectedColumns(); + int projectionSize = vContext.getProjectedColumns().size(); + + List typeInfoList = new ArrayList(); + List noNullsProjectionList = new ArrayList(); + for (int i = 0; i < projectionSize; i++) { + int projectedColumn = projectedColumns.get(i); + if (batch.cols[projectedColumn] != null) { + typeInfoList.add(inputObjInspectorsTypeInfos[i]); + noNullsProjectionList.add(projectedColumn); + } + } + + int[] noNullsProjection = ArrayUtils.toPrimitive(noNullsProjectionList.toArray(new Integer[0])); + int noNullsProjectionSize = noNullsProjection.length; + bigTablePrimitiveTypeInfos = typeInfoList.toArray(new PrimitiveTypeInfo[0]); + + bigTableVectorSerializeRow = + new VectorSerializeRow(new LazyBinarySerializeWrite(noNullsProjectionSize)); + + bigTableVectorSerializeRow.init( + bigTablePrimitiveTypeInfos, + noNullsProjectionList); + + bigTableVectorDeserializeRow = new VectorDeserializeRow( + new LazyBinaryDeserializeRead(bigTablePrimitiveTypeInfos)); + + bigTableVectorDeserializeRow.init(noNullsProjection); + } + + private void spillSerializeRow(VectorizedRowBatch batch, int batchIndex, + VectorMapJoinHashTableResult hashTableResult) throws IOException { + + int partitionId = hashTableResult.spillPartitionId(); + + HybridHashTableContainer ht = (HybridHashTableContainer) mapJoinTables[smallTable]; + HashPartition hp = ht.getHashPartitions()[partitionId]; + + VectorMapJoinRowBytesContainer rowBytesContainer = hp.getMatchfileRowBytesContainer(); + Output output = rowBytesContainer.getOuputForRowBytes(); +// int offset = output.getLength(); + bigTableVectorSerializeRow.setOutputAppend(output); + bigTableVectorSerializeRow.serializeWrite(batch, batchIndex); +// int length = output.getLength() - offset; + rowBytesContainer.finishRow(); + +// LOG.info("spillSerializeRow spilled batchIndex " + batchIndex + ", length " + length); + } + + protected void spillHashMapBatch(VectorizedRowBatch batch, + VectorMapJoinHashTableResult[] hashTableResults, + int[] spills, int[] spillHashTableResultIndices, int spillCount) + throws HiveException, IOException { + + if (bigTableVectorSerializeRow == null) { + setupSpillSerDe(batch); + } + + for (int i = 0; i < spillCount; i++) { + int batchIndex = spills[i]; + + int hashTableResultIndex = spillHashTableResultIndices[i]; + VectorMapJoinHashTableResult hashTableResult = hashTableResults[hashTableResultIndex]; + + spillSerializeRow(batch, batchIndex, hashTableResult); + } + } + + protected void spillBatchRepeated(VectorizedRowBatch batch, + VectorMapJoinHashTableResult hashTableResult) throws HiveException, IOException { + + if (bigTableVectorSerializeRow == null) { + setupSpillSerDe(batch); + } + + int[] selected = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + for (int logical = 0; logical < batch.size; logical++) { + int batchIndex = (selectedInUse ? selected[logical] : logical); + spillSerializeRow(batch, batchIndex, hashTableResult); + } + } + + @Override + protected void reloadHashTable(HashPartition partition, + HybridHashTableContainer hybridHtContainer) + throws IOException, ClassNotFoundException, HiveException, SerDeException { + + // The super method will reload a hash table partition and + // put a single MapJoinBytesTableContainer into the currentSmallTable member. + super.reloadHashTable(partition, hybridHtContainer); + + vectorMapJoinHashTable = VectorMapJoinOptimizedCreateHashTable.createHashTable(conf, + currentSmallTable); + needHashTableSetup = true; + + LOG.info(CLASS_NAME + " reloadHashTable!"); + } + + @Override + protected void reProcessBigTable(HybridHashTableContainer.HashPartition partition) + throws HiveException, IOException { + + LOG.info(CLASS_NAME + " reProcessBigTable enter..."); + + int rowCount = 0; + int batchCount = 0; + + try { + VectorMapJoinRowBytesContainer bigTable = partition.getMatchfileRowBytesContainer(); + bigTable.prepareForReading(); + + while (bigTable.readNext()) { + rowCount++; + + byte[] bytes = bigTable.currentBytes(); + int offset = bigTable.currentOffset(); + int length = bigTable.currentLength(); + +// LOG.info(CLASS_NAME + " reProcessBigTable serialized row #" + rowCount + ", offset " + offset + ", length " + length); + + bigTableVectorDeserializeRow.setBytes(bytes, offset, length); + bigTableVectorDeserializeRow.deserializeByValue(spillReplayBatch, spillReplayBatch.size); + spillReplayBatch.size++; + + if (spillReplayBatch.size == VectorizedRowBatch.DEFAULT_SIZE) { + LOG.info("reProcessBigTable going to call process with spillReplayBatch.size " + spillReplayBatch.size + " rows"); + process(spillReplayBatch, posBigTable); // call process once we have a full batch + spillReplayBatch.reset(); + batchCount++; + } + } + // Process the row batch that has less than DEFAULT_SIZE rows + if (spillReplayBatch.size > 0) { + LOG.info("reProcessBigTable going to call process with spillReplayBatch.size " + spillReplayBatch.size + " rows"); + process(spillReplayBatch, posBigTable); + spillReplayBatch.reset(); + batchCount++; + } + bigTable.clear(); + } catch (Exception e) { + LOG.info(CLASS_NAME + " reProcessBigTable exception! " + e); + throw new HiveException(e); + } + + LOG.info(CLASS_NAME + " reProcessBigTable exit! " + rowCount + " row processed and " + batchCount + " batches processed"); + } + + + //----------------------------------------------------------------------------------------------- + + /* + * Forwarding. + */ + + /** + * Forward the big table batch to the children. + * + * @param batch + * The big table batch. + */ + public void forwardBigTableBatch(VectorizedRowBatch batch) throws HiveException { + + // Save original projection. + int[] originalProjections = batch.projectedColumns; + int originalProjectionSize = batch.projectionSize; + + // Project with the output of our operator. + batch.projectionSize = outputProjection.length; + batch.projectedColumns = outputProjection; + + forward(batch, null); + + // Revert the projected columns back, because batch can be re-used by our parent operators. + batch.projectionSize = originalProjectionSize; + batch.projectedColumns = originalProjections; + } + + + /** + * Forward the overflow batch and reset the batch. + */ + protected void forwardOverflow() throws HiveException { + forward(overflowBatch, null); + overflowBatch.reset(); + } + + /** + * Forward the overflow batch, but do not reset the batch. + */ + private void forwardOverflowNoReset() throws HiveException { + forward(overflowBatch, null); + } + + /* + * Close. + */ + + /** + * On close, make sure a partially filled overflow batch gets forwarded. + */ + @Override + public void closeOp(boolean aborted) throws HiveException { + super.closeOp(aborted); + if (!aborted && overflowBatch.size > 0) { + forwardOverflow(); + } + LOG.info("VectorMapJoinInnerLongOperator closeOp " + batchCounter + " batches processed"); + } + + //----------------------------------------------------------------------------------------------- + + /* + * Debug. + */ + + public static String intArrayToRangesString(int selection[], int size) { + if (size == 0) { + return "[]"; + } + + StringBuilder sb = new StringBuilder(); + + // Use ranges and duplicate multipliers to reduce the size of the display. + sb.append("["); + int firstIndex = 0; + int firstValue = selection[0]; + + boolean duplicates = false; + + int i = 1; + for ( ; i < size; i++) { + int newValue = selection[i]; + if (newValue == selection[i - 1]) { + + // Duplicate. + duplicates = true; + + if (newValue == firstValue) { + continue; + } else { + // Prior none, singleton, or range? + int priorRangeLength = i - 1 - firstIndex; + + if (priorRangeLength == 0) { + continue; + } + if (firstIndex > 0) { + sb.append(","); + } + sb.append(firstValue); + if (priorRangeLength > 1) { + sb.append(".." + selection[i - 2]); + } + firstIndex = i - 1; + firstValue = newValue; + continue; + } + } else { + if (duplicates) { + int numDuplicates = i - firstIndex; + if (firstIndex > 0) { + sb.append(","); + } + sb.append(numDuplicates + "*" + firstValue); + duplicates = false; + firstIndex = i; + firstValue = newValue; + continue; + } if (newValue == selection[i - 1] + 1) { + // Continue range.. + continue; + } else { + // Prior singleton or range? + int priorRangeLength = i - firstIndex; + if (firstIndex > 0) { + sb.append(","); + } + sb.append(firstValue); + if (priorRangeLength > 1) { + sb.append(".." + selection[i - 1]); + } + firstIndex = i; + firstValue = newValue; + continue; + } + } + } + if (duplicates) { + int numDuplicates = i - firstIndex; + if (firstIndex > 0) { + sb.append(","); + } + sb.append(numDuplicates + "*" + firstValue); + } else { + // Last singleton or range? + int priorRangeLength = i - firstIndex; + if (firstIndex > 0) { + sb.append(","); + } + sb.append(firstValue); + if (priorRangeLength > 1) { + sb.append(".." + selection[i - 1]); + } + } + sb.append("]"); + return sb.toString(); + } + + public static String longArrayToRangesString(long selection[], int size) { + if (size == 0) { + return "[]"; + } + + StringBuilder sb = new StringBuilder(); + + // Use ranges and duplicate multipliers to reduce the size of the display. + sb.append("["); + int firstIndex = 0; + long firstValue = selection[0]; + + boolean duplicates = false; + + int i = 1; + for ( ; i < size; i++) { + long newValue = selection[i]; + if (newValue == selection[i - 1]) { + + // Duplicate. + duplicates = true; + + if (newValue == firstValue) { + continue; + } else { + // Prior none, singleton, or range? + int priorRangeLength = i - 1 - firstIndex; + + if (priorRangeLength == 0) { + continue; + } + if (firstIndex > 0) { + sb.append(","); + } + sb.append(firstValue); + if (priorRangeLength > 1) { + sb.append(".." + selection[i - 2]); + } + firstIndex = i - 1; + firstValue = newValue; + continue; + } + } else { + if (duplicates) { + int numDuplicates = i - firstIndex; + if (firstIndex > 0) { + sb.append(","); + } + sb.append(numDuplicates + "*" + firstValue); + duplicates = false; + firstIndex = i; + firstValue = newValue; + continue; + } if (newValue == selection[i - 1] + 1) { + // Continue range.. + continue; + } else { + // Prior singleton or range? + int priorRangeLength = i - firstIndex; + if (firstIndex > 0) { + sb.append(","); + } + sb.append(firstValue); + if (priorRangeLength > 1) { + sb.append(".." + selection[i - 1]); + } + firstIndex = i; + firstValue = newValue; + continue; + } + } + } + if (duplicates) { + int numDuplicates = i - firstIndex; + if (firstIndex > 0) { + sb.append(","); + } + sb.append(numDuplicates + "*" + firstValue); + } else { + // Last singleton or range? + int priorRangeLength = i - firstIndex; + if (firstIndex > 0) { + sb.append(","); + } + sb.append(firstValue); + if (priorRangeLength > 1) { + sb.append(".." + selection[i - 1]); + } + } + sb.append("]"); + return sb.toString(); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyGenerateResultOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyGenerateResultOperator.java new file mode 100644 index 0000000..a4d24a8 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyGenerateResultOperator.java @@ -0,0 +1,367 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMultiSet; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTableResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMultiSetResult; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +/** + * This class has methods for generating vectorized join results for the big table only + * variation of inner joins. + * + * When an inner join does not have any small table columns in the join result, we use this + * variation we call inner big only. This variation uses a hash multi-set instead of hash map + * since there are no values (just a count). + * + * Note that if a inner key appears in the small table results area, we use the inner join + * projection optimization and are able to use this variation. + */ +public abstract class VectorMapJoinInnerBigOnlyGenerateResultOperator + extends VectorMapJoinGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinInnerBigOnlyGenerateResultOperator.class.getName()); + + //--------------------------------------------------------------------------- + // Inner big-table only join specific members. + // + + // An array of hash multi-set results so we can do lookups on the whole batch before output result + // generation. + protected transient VectorMapJoinHashMultiSetResult hashMultiSetResults[]; + + // Pre-allocated member for storing the (physical) batch index of matching row (single- or + // multi-small-table-valued) indexes during a process call. + protected transient int[] allMatchs; + + /* + * Pre-allocated members for storing information on single- and multi-valued-small-table matches. + * + * ~ValueCounts + * Number of (empty) small table values. + * ~AllMatchIndices + * (Logical) indices into allMatchs to the first row of a match of a + * possible series of duplicate keys. + * ~DuplicateCounts + * The duplicate count for each matched key. + * + */ + protected transient long[] equalKeySeriesValueCounts; + protected transient int[] equalKeySeriesAllMatchIndices; + protected transient int[] equalKeySeriesDuplicateCounts; + + + // Pre-allocated member for storing the (physical) batch index of rows that need to be spilled. + protected transient int[] spills; + + // Pre-allocated member for storing index into the hashMultiSetResults for each spilled row. + protected transient int[] spillHashMapResultIndices; + + public VectorMapJoinInnerBigOnlyGenerateResultOperator() { + super(); + } + + public VectorMapJoinInnerBigOnlyGenerateResultOperator(VectorizationContext vContext, OperatorDesc conf) + throws HiveException { + super(vContext, conf); + } + + /* + * Setup our inner big table only join specific members. + */ + protected void commonSetup(VectorizedRowBatch batch) throws HiveException { + super.commonSetup(batch); + + // Inner big-table only join specific. + VectorMapJoinHashMultiSet baseHashMultiSet = (VectorMapJoinHashMultiSet) vectorMapJoinHashTable; + + hashMultiSetResults = new VectorMapJoinHashMultiSetResult[batch.DEFAULT_SIZE]; + for (int i = 0; i < hashMultiSetResults.length; i++) { + hashMultiSetResults[i] = baseHashMultiSet.createHashMultiSetResult(); + } + + allMatchs = new int[batch.DEFAULT_SIZE]; + + equalKeySeriesValueCounts = new long[batch.DEFAULT_SIZE]; + equalKeySeriesAllMatchIndices = new int[batch.DEFAULT_SIZE]; + equalKeySeriesDuplicateCounts = new int[batch.DEFAULT_SIZE]; + + spills = new int[batch.DEFAULT_SIZE]; + spillHashMapResultIndices = new int[batch.DEFAULT_SIZE]; + } + + //----------------------------------------------------------------------------------------------- + + /* + * Inner big table only join (hash multi-set). + */ + + /** + * Generate the inner big table only join output results for one vectorized row batch. + * + * @param batch + * The big table batch with any matching and any non matching rows both as + * selected in use. + * @param allMatchs + * A subset of the rows of the batch that are matches. + * @param allMatchCount + * Number of matches in allMatchs. + * @param equalKeySeriesValueCounts + * For each equal key series, whether the number of (empty) small table values. + * @param equalKeySeriesAllMatchIndices + * For each equal key series, the logical index into allMatchs. + * @param equalKeySeriesDuplicateCounts + * For each equal key series, the number of duplicates or equal keys. + * @param equalKeySeriesCount + * Number of single value matches. + * @param spills + * A subset of the rows of the batch that are spills. + * @param spillHashMapResultIndices + * For each entry in spills, the index into the hashMapResult. + * @param spillCount + * Number of spills in spills. + * @param hashTableResults + * The array of all hash table results for the batch. We need the + * VectorMapJoinHashTableResult for the spill information. + * @param hashMapResultCount + * Number of entries in hashMapResults. + * + **/ + protected int finishInnerBigOnly(VectorizedRowBatch batch, + int[] allMatchs, int allMatchCount, + long[] equalKeySeriesValueCounts, int[] equalKeySeriesAllMatchIndices, + int[] equalKeySeriesDuplicateCounts, int equalKeySeriesCount, + int[] spills, int[] spillHashMapResultIndices, int spillCount, + VectorMapJoinHashTableResult[] hashTableResults, int hashMapResultCount) + throws HiveException, IOException { + + int numSel = 0; + + /* + * Optimize by running value expressions only over the matched rows. + */ + if (allMatchCount > 0 && bigTableValueExpressions != null) { + performValueExpressions(batch, allMatchs, allMatchCount); + } + + for (int i = 0; i < equalKeySeriesCount; i++) { + long count = equalKeySeriesValueCounts[i]; + int allMatchesIndex = equalKeySeriesAllMatchIndices[i]; + int duplicateCount = equalKeySeriesDuplicateCounts[i]; + + if (count == 1) { + numSel = generateHashMultiSetResultSingleValue( + batch, allMatchs, allMatchesIndex, duplicateCount, numSel); + } else { + generateHashMultiSetResultMultiValue(batch, + allMatchs, allMatchesIndex, + duplicateCount, count); + } + } + + if (spillCount > 0) { + spillHashMapBatch(batch, hashTableResults, + spills, spillHashMapResultIndices, spillCount); + } + + return numSel; + } + + /** + * Generate the single value match inner big table only join output results for a match. + * + * @param batch + * The big table batch. + * @param allMatchs + * A subset of the rows of the batch that are matches. + * @param allMatchesIndex + * The logical index into allMatchs of the first equal key. + * @param duplicateCount + * The number of duplicates or equal keys. + * @param numSel + * The current count of rows in the rebuilding of the selected array. + * + * @return + * The new count of selected rows. + */ + private int generateHashMultiSetResultSingleValue(VectorizedRowBatch batch, + int[] allMatchs, int allMatchesIndex, int duplicateCount, int numSel) + throws HiveException, IOException { + + // LOG.info("generateHashMultiSetResultSingleValue enter..."); + + if (!onlyUseOverflowBatch) { + + // Generate result within big table batch itself. + + // LOG.info("generateHashMultiSetResultSingleValue with big table..."); + + for (int i = 0; i < duplicateCount; i++) { + + int batchIndex = allMatchs[allMatchesIndex + i]; + + // Use the big table row as output. + batch.selected[numSel++] = batchIndex; + } + } else { + + // Generate result in overflow batch. + + // LOG.info("generateHashMultiSetResultSingleValue in overflow batch.. (bigTableRetainedVectorCopy != null)" + (bigTableRetainedVectorCopy != null)); + + for (int i = 0; i < duplicateCount; i++) { + + int batchIndex = allMatchs[allMatchesIndex + i]; + + // Copy the BigTable values into the overflow batch. Since the overflow batch may + // not get flushed here, we must copy by value. + if (bigTableRetainedVectorCopy != null) { + bigTableRetainedVectorCopy.copyByValue(batch, batchIndex, + overflowBatch, overflowBatch.size); + } + + overflowBatch.size++; + if (overflowBatch.size == overflowBatch.DEFAULT_SIZE) { + forwardOverflow(); + } + } + } + return numSel; + } + + /** + * Generate results for a N x M cross product. + * + * @param batch + * The big table batch. + * @param allMatchs + * The all match selected array that contains (physical) batch indices. + * @param allMatchesIndex + * The index of the match key. + * @param duplicateCount + * Number of equal key rows. + * @param count + * Value count. + */ + private void generateHashMultiSetResultMultiValue(VectorizedRowBatch batch, + int[] allMatchs, int allMatchesIndex, + int duplicateCount, long count) throws HiveException, IOException { + + // LOG.info("generateHashMultiSetResultMultiValue allMatchesIndex " + allMatchesIndex + " duplicateCount " + duplicateCount + " count " + count); + + // TODO: Look at repeating optimizations... + + for (int i = 0; i < duplicateCount; i++) { + + int batchIndex = allMatchs[allMatchesIndex + i]; + + for (long l = 0; l < count; l++) { + + // Copy the BigTable values into the overflow batch. Since the overflow batch may + // not get flushed here, we must copy by value. + if (bigTableRetainedVectorCopy != null) { + bigTableRetainedVectorCopy.copyByValue(batch, batchIndex, + overflowBatch, overflowBatch.size); + } + + overflowBatch.size++; + if (overflowBatch.size == overflowBatch.DEFAULT_SIZE) { + forwardOverflow(); + } + } + } + } + + /** + * Generate the inner big table only join output results for one vectorized row batch with + * a repeated key. + * + * @param batch + * The big table batch with any matching and any non matching rows both as + * selected in use. + * @param hashMultiSetResult + * The hash multi-set results for the batch. + */ + protected int generateHashMultiSetResultRepeatedAll(VectorizedRowBatch batch, + VectorMapJoinHashMultiSetResult hashMultiSetResult) throws HiveException { + + long count = hashMultiSetResult.count(); + + if (batch.selectedInUse) { + // The selected array is already filled in as we want it. + } else { + int[] selected = batch.selected; + for (int i = 0; i < batch.size; i++) { + selected[i] = i; + } + batch.selectedInUse = true; + } + + do { + forwardBigTableBatch(batch); + count--; + } while (count > 0); + + // We forwarded the batch in this method. + return 0; + } + + protected int finishInnerBigOnlyRepeated(VectorizedRowBatch batch, JoinUtil.JoinResult joinResult, + VectorMapJoinHashMultiSetResult hashMultiSetResult) throws HiveException, IOException { + + int numSel = 0; + + switch (joinResult) { + case MATCH: + + if (bigTableValueExpressions != null) { + // Run our value expressions over whole batch. + for(VectorExpression ve: bigTableValueExpressions) { + ve.evaluate(batch); + } + } + + // Generate special repeated case. + numSel = generateHashMultiSetResultRepeatedAll(batch, hashMultiSetResult); + break; + + case SPILL: + // Whole batch is spilled. + spillBatchRepeated(batch, (VectorMapJoinHashTableResult) hashMultiSetResult); + break; + + case NOMATCH: + // No match for entire batch. + break; + } + + return numSel; + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java new file mode 100644 index 0000000..2173829 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java @@ -0,0 +1,378 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTableResult; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +// Single-Column Long hash table import. +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinLongHashMultiSet; + +// Single-Column Long specific imports. +import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; + +/* + * Specialized class for doing a vectorized map join that is an inner join on a Single-Column Long + * and only big table columns appear in the join result so a hash multi-set is used. + */ +public class VectorMapJoinInnerBigOnlyLongOperator extends VectorMapJoinInnerBigOnlyGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinInnerBigOnlyLongOperator.class.getName()); + private static final String CLASS_NAME = VectorMapJoinInnerBigOnlyLongOperator.class.getName(); + + // (none) + + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- + + // The hash map for this specialized class. + private transient VectorMapJoinLongHashMultiSet hashMultiSet; + + //--------------------------------------------------------------------------- + // Single-Column Long specific members. + // + + // For integers, we have optional min/max filtering. + private transient boolean useMinMax; + private transient long min; + private transient long max; + + // The column number for this one column join specialization. + private transient int singleJoinColumn; + + //--------------------------------------------------------------------------- + // Pass-thru constructors. + // + + public VectorMapJoinInnerBigOnlyLongOperator() { + super(); + } + + public VectorMapJoinInnerBigOnlyLongOperator(VectorizationContext vContext, OperatorDesc conf) throws HiveException { + super(vContext, conf); + } + + //--------------------------------------------------------------------------- + // Process Single-Column Long Inner Big-Only Join on a vectorized row batch. + // + + @Override + public void process(Object row, int tag) throws HiveException { + + try { + VectorizedRowBatch batch = (VectorizedRowBatch) row; + + alias = (byte) tag; + + if (needCommonSetup) { + // Our one time process method initialization. + commonSetup(batch); + + /* + * Initialize Single-Column Long members for this specialized class. + */ + + singleJoinColumn = bigTableKeyColumnMap[0]; + + needCommonSetup = false; + } + + if (needHashTableSetup) { + // Setup our hash table specialization. It will be the first time the process + // method is called, or after a Hybrid Grace reload. + + /* + * Get our Single-Column Long hash multi-set information for this specialized class. + */ + + hashMultiSet = (VectorMapJoinLongHashMultiSet) vectorMapJoinHashTable; + useMinMax = hashMultiSet.useMinMax(); + if (useMinMax) { + min = hashMultiSet.min(); + max = hashMultiSet.max(); + } + + needHashTableSetup = false; + } + + batchCounter++; + + // For inner joins, we may apply the filter(s) now. + for(VectorExpression ve : bigTableFilterExpressions) { + ve.evaluate(batch); + } + + final int inputLogicalSize = batch.size; + + if (inputLogicalSize == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); + } + return; + } + + // Perform any key expressions. Results will go into scratch columns. + if (bigTableKeyExpressions != null) { + for (VectorExpression ve : bigTableKeyExpressions) { + ve.evaluate(batch); + } + } + + // We rebuild in-place the selected array with rows destine to be forwarded. + int numSel = 0; + + /* + * Single-Column Long specific declarations. + */ + + // The one join column for this specialized class. + LongColumnVector joinColVector = (LongColumnVector) batch.cols[singleJoinColumn]; + long[] vector = joinColVector.vector; + + /* + * Single-Column Long check for repeating. + */ + + // Check single column for repeating. + boolean allKeyInputColumnsRepeating = joinColVector.isRepeating; + + if (allKeyInputColumnsRepeating) { + + /* + * Repeating. + */ + + // All key input columns are repeating. Generate key once. Lookup once. + // Since the key is repeated, we must use entry 0 regardless of selectedInUse. + + /* + * Single-Column Long specific repeated lookup. + */ + + long key = vector[0]; + JoinUtil.JoinResult joinResult; + if (useMinMax && (key < min || key > max)) { + // Out of range for whole batch. + joinResult = JoinUtil.JoinResult.NOMATCH; + } else { + joinResult = hashMultiSet.contains(key, hashMultiSetResults[0]); + } + + /* + * Common repeated join result processing. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); + } + numSel = finishInnerBigOnlyRepeated(batch, joinResult, hashMultiSetResults[0]); + } else { + + /* + * NOT Repeating. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); + } + + // We remember any matching rows in matchs / matchSize. At the end of the loop, + // selected / batch.size will represent both matching and non-matching rows for outer join. + // Only deferred rows will have been removed from selected. + int selected[] = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + int hashMultiSetResultCount = 0; + int allMatchCount = 0; + int equalKeySeriesCount = 0; + int spillCount = 0; + + /* + * Single-Column Long specific variables. + */ + + long saveKey = 0; + + // We optimize performance by only looking up the first key in a series of equal keys. + boolean haveSaveKey = false; + JoinUtil.JoinResult saveJoinResult = JoinUtil.JoinResult.NOMATCH; + + // Logical loop over the rows in the batch since the batch may have selected in use. + for (int logical = 0; logical < inputLogicalSize; logical++) { + int batchIndex = (selectedInUse ? selected[logical] : logical); + + /* + * Single-Column Long get key. + */ + + long currentKey = vector[batchIndex]; + + /* + * Equal key series checking. + */ + + if (!haveSaveKey || currentKey != saveKey) { + + // New key. + + if (haveSaveKey) { + // Move on with our counts. + switch (saveJoinResult) { + case MATCH: + // We have extracted the count from the hash multi-set result, so we don't keep it. + equalKeySeriesCount++; + break; + case SPILL: + // We keep the hash multi-set result for its spill information. + hashMultiSetResultCount++; + break; + case NOMATCH: + break; + } + } + + // Regardless of our matching result, we keep that information to make multiple use + // of it for a possible series of equal keys. + haveSaveKey = true; + + /* + * Single-Column Long specific save key. + */ + + saveKey = currentKey; + + /* + * Single-Column Long specific lookup key. + */ + + if (useMinMax && (currentKey < min || currentKey > max)) { + // Key out of range for whole hash table. + saveJoinResult = JoinUtil.JoinResult.NOMATCH; + } else { + saveJoinResult = hashMultiSet.contains(currentKey, hashMultiSetResults[hashMultiSetResultCount]); + } + + /* + * Common inner big-only join result processing. + */ + + switch (saveJoinResult) { + case MATCH: + equalKeySeriesValueCounts[equalKeySeriesCount] = hashMultiSetResults[hashMultiSetResultCount].count(); + equalKeySeriesAllMatchIndices[equalKeySeriesCount] = allMatchCount; + equalKeySeriesDuplicateCounts[equalKeySeriesCount] = 1; + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH isSingleValue " + equalKeySeriesIsSingleValue[equalKeySeriesCount] + " currentKey " + currentKey); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashMultiSetResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH" + " currentKey " + currentKey); + break; + } + } else { + // Series of equal keys. + + switch (saveJoinResult) { + case MATCH: + equalKeySeriesDuplicateCounts[equalKeySeriesCount]++; + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH duplicate"); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashMultiSetResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH duplicate"); + break; + } + } + } + + if (haveSaveKey) { + // Update our counts for the last key. + switch (saveJoinResult) { + case MATCH: + // We have extracted the count from the hash multi-set result, so we don't keep it. + equalKeySeriesCount++; + break; + case SPILL: + // We keep the hash multi-set result for its spill information. + hashMultiSetResultCount++; + break; + case NOMATCH: + break; + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + + " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) + + " equalKeySeriesValueCounts " + longArrayToRangesString(equalKeySeriesValueCounts, equalKeySeriesCount) + + " equalKeySeriesAllMatchIndices " + intArrayToRangesString(equalKeySeriesAllMatchIndices, equalKeySeriesCount) + + " equalKeySeriesDuplicateCounts " + intArrayToRangesString(equalKeySeriesDuplicateCounts, equalKeySeriesCount) + + " spills " + intArrayToRangesString(spills, spillCount) + + " spillHashMapResultIndices " + intArrayToRangesString(spillHashMapResultIndices, spillCount) + + " hashMapResults " + Arrays.toString(Arrays.copyOfRange(hashMultiSetResults, 0, hashMultiSetResultCount))); + } + + numSel = finishInnerBigOnly(batch, + allMatchs, allMatchCount, + equalKeySeriesValueCounts, equalKeySeriesAllMatchIndices, + equalKeySeriesDuplicateCounts, equalKeySeriesCount, + spills, spillHashMapResultIndices, spillCount, + (VectorMapJoinHashTableResult[]) hashMultiSetResults, hashMultiSetResultCount); + } + + batch.selectedInUse = true; + batch.size = numSel; + + if (batch.size > 0) { + // Forward any remaining selected rows. + forwardBigTableBatch(batch); + } + + } catch (IOException e) { + throw new HiveException(e); + } catch (Exception e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java new file mode 100644 index 0000000..ab6c17e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java @@ -0,0 +1,391 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTableResult; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +// Multi-Key hash table import. +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashMultiSet; + +// Multi-Key specific imports. +import org.apache.hadoop.hive.ql.exec.vector.VectorSerializeRowNoNulls; +import org.apache.hadoop.hive.serde2.ByteStream.Output; +import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableSerializeWrite; + +/* + * Specialized class for doing a vectorized map join that is an inner join on Multi-Key + * and only big table columns appear in the join result so a hash multi-set is used. + */ + +public class VectorMapJoinInnerBigOnlyMultiKeyOperator extends VectorMapJoinInnerBigOnlyGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinInnerBigOnlyMultiKeyOperator.class.getName()); + private static final String CLASS_NAME = VectorMapJoinInnerBigOnlyMultiKeyOperator.class.getName(); + + // (none) + + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- + + // The hash map for this specialized class. + private transient VectorMapJoinBytesHashMultiSet hashMultiSet; + + //--------------------------------------------------------------------------- + // Multi-Key specific members. + // + + // Object that can take a set of columns in row in a vectorized row batch and serialized it. + // Known to not have any nulls. + private transient VectorSerializeRowNoNulls keyVectorSerializeWriteNoNulls; + + // The BinarySortable serialization of the current key. + private transient Output currentKeyOutput; + + // The BinarySortable serialization of the saved key for a possible series of equal keys. + private transient Output saveKeyOutput; + + //--------------------------------------------------------------------------- + // Pass-thru constructors. + // + + public VectorMapJoinInnerBigOnlyMultiKeyOperator() { + super(); + } + + public VectorMapJoinInnerBigOnlyMultiKeyOperator(VectorizationContext vContext, OperatorDesc conf) throws HiveException { + super(vContext, conf); + } + + //--------------------------------------------------------------------------- + // Process Multi-Key Inner Big-Only Join on a vectorized row batch. + // + + @Override + public void process(Object row, int tag) throws HiveException { + + try { + VectorizedRowBatch batch = (VectorizedRowBatch) row; + + alias = (byte) tag; + + if (needCommonSetup) { + // Our one time process method initialization. + commonSetup(batch); + + /* + * Initialize Multi-Key members for this specialized class. + */ + + keyVectorSerializeWriteNoNulls = new VectorSerializeRowNoNulls( + new BinarySortableSerializeWrite(bigTableKeyColumnMap.length)); + keyVectorSerializeWriteNoNulls.init(bigTableKeyTypeNames, bigTableKeyColumnMap); + + currentKeyOutput = new Output(); + saveKeyOutput = new Output(); + + needCommonSetup = false; + } + + if (needHashTableSetup) { + // Setup our hash table specialization. It will be the first time the process + // method is called, or after a Hybrid Grace reload. + + /* + * Get our Multi-Key hash multi-set information for this specialized class. + */ + + hashMultiSet = (VectorMapJoinBytesHashMultiSet) vectorMapJoinHashTable; + + needHashTableSetup = false; + } + + batchCounter++; + + // For inner joins, we may apply the filter(s) now. + for(VectorExpression ve : bigTableFilterExpressions) { + ve.evaluate(batch); + } + + final int inputLogicalSize = batch.size; + + if (inputLogicalSize == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); + } + return; + } + + // Perform any key expressions. Results will go into scratch columns. + if (bigTableKeyExpressions != null) { + for (VectorExpression ve : bigTableKeyExpressions) { + ve.evaluate(batch); + } + } + + // We rebuild in-place the selected array with rows destine to be forwarded. + int numSel = 0; + + /* + * Multi-Key specific declarations. + */ + + // None. + + /* + * Multi-Key check for repeating. + */ + + // If all BigTable input columns to key expressions are isRepeating, then + // calculate key once; lookup once. + boolean allKeyInputColumnsRepeating; + if (bigTableKeyColumnMap.length == 0) { + allKeyInputColumnsRepeating = false; + } else { + allKeyInputColumnsRepeating = true; + for (int i = 0; i < bigTableKeyColumnMap.length; i++) { + if (!batch.cols[bigTableKeyColumnMap[i]].isRepeating) { + allKeyInputColumnsRepeating = false; + break; + } + } + } + + if (allKeyInputColumnsRepeating) { + + /* + * Repeating. + */ + + // All key input columns are repeating. Generate key once. Lookup once. + // Since the key is repeated, we must use entry 0 regardless of selectedInUse. + + /* + * Multi-Key specific repeated lookup. + */ + + keyVectorSerializeWriteNoNulls.setOutput(currentKeyOutput); + keyVectorSerializeWriteNoNulls.serializeWriteNoNulls(batch, 0); + byte[] keyBytes = currentKeyOutput.getData(); + int keyLength = currentKeyOutput.getLength(); + JoinUtil.JoinResult joinResult = hashMultiSet.contains(keyBytes, 0, keyLength, hashMultiSetResults[0]); + + /* + * Common repeated join result processing. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); + } + numSel = finishInnerBigOnlyRepeated(batch, joinResult, hashMultiSetResults[0]); + } else { + + /* + * NOT Repeating. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); + } + + // We remember any matching rows in matchs / matchSize. At the end of the loop, + // selected / batch.size will represent both matching and non-matching rows for outer join. + // Only deferred rows will have been removed from selected. + int selected[] = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + int hashMultiSetResultCount = 0; + int allMatchCount = 0; + int equalKeySeriesCount = 0; + int spillCount = 0; + + /* + * Multi-Key specific variables. + */ + + Output temp; + + // We optimize performance by only looking up the first key in a series of equal keys. + boolean haveSaveKey = false; + JoinUtil.JoinResult saveJoinResult = JoinUtil.JoinResult.NOMATCH; + + // Logical loop over the rows in the batch since the batch may have selected in use. + for (int logical = 0; logical < inputLogicalSize; logical++) { + int batchIndex = (selectedInUse ? selected[logical] : logical); + + /* + * Multi-Key get key. + */ + + // Generate binary sortable key for current row in vectorized row batch. + keyVectorSerializeWriteNoNulls.setOutput(currentKeyOutput); + keyVectorSerializeWriteNoNulls.serializeWriteNoNulls(batch, batchIndex); + + /* + * Equal key series checking. + */ + + if (!haveSaveKey || !saveKeyOutput.arraysEquals(currentKeyOutput)) { + + // New key. + + if (haveSaveKey) { + // Move on with our counts. + switch (saveJoinResult) { + case MATCH: + // We have extracted the count from the hash multi-set result, so we don't keep it. + equalKeySeriesCount++; + break; + case SPILL: + // We keep the hash multi-set result for its spill information. + hashMultiSetResultCount++; + break; + case NOMATCH: + break; + } + } + + // Regardless of our matching result, we keep that information to make multiple use + // of it for a possible series of equal keys. + haveSaveKey = true; + + /* + * Multi-Key specific save key. + */ + + temp = saveKeyOutput; + saveKeyOutput = currentKeyOutput; + currentKeyOutput = temp; + + /* + * Single-Column Long specific lookup key. + */ + + byte[] keyBytes = saveKeyOutput.getData(); + int keyLength = saveKeyOutput.getLength(); + saveJoinResult = hashMultiSet.contains(keyBytes, 0, keyLength, hashMultiSetResults[hashMultiSetResultCount]); + + /* + * Common inner big-only join result processing. + */ + + switch (saveJoinResult) { + case MATCH: + equalKeySeriesValueCounts[equalKeySeriesCount] = hashMultiSetResults[hashMultiSetResultCount].count(); + equalKeySeriesAllMatchIndices[equalKeySeriesCount] = allMatchCount; + equalKeySeriesDuplicateCounts[equalKeySeriesCount] = 1; + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH isSingleValue " + equalKeySeriesIsSingleValue[equalKeySeriesCount] + " currentKey " + currentKey); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashMultiSetResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH" + " currentKey " + currentKey); + break; + } + } else { + // Series of equal keys. + + switch (saveJoinResult) { + case MATCH: + equalKeySeriesDuplicateCounts[equalKeySeriesCount]++; + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH duplicate"); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashMultiSetResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH duplicate"); + break; + } + } + } + + if (haveSaveKey) { + // Update our counts for the last key. + switch (saveJoinResult) { + case MATCH: + // We have extracted the count from the hash multi-set result, so we don't keep it. + equalKeySeriesCount++; + break; + case SPILL: + // We keep the hash multi-set result for its spill information. + hashMultiSetResultCount++; + break; + case NOMATCH: + break; + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + + " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) + + " equalKeySeriesValueCounts " + longArrayToRangesString(equalKeySeriesValueCounts, equalKeySeriesCount) + + " equalKeySeriesAllMatchIndices " + intArrayToRangesString(equalKeySeriesAllMatchIndices, equalKeySeriesCount) + + " equalKeySeriesDuplicateCounts " + intArrayToRangesString(equalKeySeriesDuplicateCounts, equalKeySeriesCount) + + " spills " + intArrayToRangesString(spills, spillCount) + + " spillHashMapResultIndices " + intArrayToRangesString(spillHashMapResultIndices, spillCount) + + " hashMapResults " + Arrays.toString(Arrays.copyOfRange(hashMultiSetResults, 0, hashMultiSetResultCount))); + } + + numSel = finishInnerBigOnly(batch, + allMatchs, allMatchCount, + equalKeySeriesValueCounts, equalKeySeriesAllMatchIndices, + equalKeySeriesDuplicateCounts, equalKeySeriesCount, + spills, spillHashMapResultIndices, spillCount, + (VectorMapJoinHashTableResult[]) hashMultiSetResults, hashMultiSetResultCount); + } + + batch.selectedInUse = true; + batch.size = numSel; + + if (batch.size > 0) { + // Forward any remaining selected rows. + forwardBigTableBatch(batch); + } + + } catch (IOException e) { + throw new HiveException(e); + } catch (Exception e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java new file mode 100644 index 0000000..0b725aa --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java @@ -0,0 +1,367 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTableResult; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +// Single-Column String hash table import. +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashMultiSet; + +// Single-Column String specific imports. +import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr; + +/* + * Specialized class for doing a vectorized map join that is an inner join on a Single-Column String + * and only big table columns appear in the join result so a hash multi-set is used. + */ +public class VectorMapJoinInnerBigOnlyStringOperator extends VectorMapJoinInnerBigOnlyGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinInnerBigOnlyStringOperator.class.getName()); + private static final String CLASS_NAME = VectorMapJoinInnerBigOnlyStringOperator.class.getName(); + + // (none) + + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- + + // The hash map for this specialized class. + private transient VectorMapJoinBytesHashMultiSet hashMultiSet; + + //--------------------------------------------------------------------------- + // Single-Column String specific members. + // + + // The column number for this one column join specialization. + private transient int singleJoinColumn; + + //--------------------------------------------------------------------------- + // Pass-thru constructors. + // + + public VectorMapJoinInnerBigOnlyStringOperator() { + super(); + } + + public VectorMapJoinInnerBigOnlyStringOperator(VectorizationContext vContext, OperatorDesc conf) throws HiveException { + super(vContext, conf); + } + + //--------------------------------------------------------------------------- + // Process Single-Column String Inner Big-Only Join on a vectorized row batch. + // + + @Override + public void process(Object row, int tag) throws HiveException { + + try { + VectorizedRowBatch batch = (VectorizedRowBatch) row; + + alias = (byte) tag; + + if (needCommonSetup) { + // Our one time process method initialization. + commonSetup(batch); + + /* + * Initialize Single-Column String members for this specialized class. + */ + + singleJoinColumn = bigTableKeyColumnMap[0]; + + needCommonSetup = false; + } + + if (needHashTableSetup) { + // Setup our hash table specialization. It will be the first time the process + // method is called, or after a Hybrid Grace reload. + + /* + * Get our Single-Column String hash multi-set information for this specialized class. + */ + + hashMultiSet = (VectorMapJoinBytesHashMultiSet) vectorMapJoinHashTable; + + needHashTableSetup = false; + } + + batchCounter++; + + // For inner joins, we may apply the filter(s) now. + for(VectorExpression ve : bigTableFilterExpressions) { + ve.evaluate(batch); + } + + final int inputLogicalSize = batch.size; + + if (inputLogicalSize == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); + } + return; + } + + // Perform any key expressions. Results will go into scratch columns. + if (bigTableKeyExpressions != null) { + for (VectorExpression ve : bigTableKeyExpressions) { + ve.evaluate(batch); + } + } + + // We rebuild in-place the selected array with rows destine to be forwarded. + int numSel = 0; + + /* + * Single-Column String specific declarations. + */ + + // The one join column for this specialized class. + BytesColumnVector joinColVector = (BytesColumnVector) batch.cols[singleJoinColumn]; + byte[][] vector = joinColVector.vector; + int[] start = joinColVector.start; + int[] length = joinColVector.length; + + /* + * Single-Column String check for repeating. + */ + + // Check single column for repeating. + boolean allKeyInputColumnsRepeating = joinColVector.isRepeating; + + if (allKeyInputColumnsRepeating) { + + /* + * Repeating. + */ + + // All key input columns are repeating. Generate key once. Lookup once. + // Since the key is repeated, we must use entry 0 regardless of selectedInUse. + + /* + * Single-Column String specific repeated lookup. + */ + + byte[] keyBytes = vector[0]; + int keyStart = start[0]; + int keyLength = length[0]; + JoinUtil.JoinResult joinResult = hashMultiSet.contains(keyBytes, keyStart, keyLength, hashMultiSetResults[0]); + + /* + * Common repeated join result processing. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); + } + numSel = finishInnerBigOnlyRepeated(batch, joinResult, hashMultiSetResults[0]); + } else { + + /* + * NOT Repeating. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); + } + + // We remember any matching rows in matchs / matchSize. At the end of the loop, + // selected / batch.size will represent both matching and non-matching rows for outer join. + // Only deferred rows will have been removed from selected. + int selected[] = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + int hashMultiSetResultCount = 0; + int allMatchCount = 0; + int equalKeySeriesCount = 0; + int spillCount = 0; + + /* + * Single-Column String specific variables. + */ + + int saveKeyBatchIndex = -1; + + // We optimize performance by only looking up the first key in a series of equal keys. + boolean haveSaveKey = false; + JoinUtil.JoinResult saveJoinResult = JoinUtil.JoinResult.NOMATCH; + + // Logical loop over the rows in the batch since the batch may have selected in use. + for (int logical = 0; logical < inputLogicalSize; logical++) { + int batchIndex = (selectedInUse ? selected[logical] : logical); + + /* + * Single-Column String get key. + */ + + // Implicit -- use batchIndex. + + /* + * Equal key series checking. + */ + + if (!haveSaveKey || + StringExpr.compare(vector[saveKeyBatchIndex], start[saveKeyBatchIndex], length[saveKeyBatchIndex], + vector[batchIndex], start[batchIndex], length[batchIndex]) != 0) { + + // New key. + + if (haveSaveKey) { + // Move on with our counts. + switch (saveJoinResult) { + case MATCH: + // We have extracted the count from the hash multi-set result, so we don't keep it. + equalKeySeriesCount++; + break; + case SPILL: + // We keep the hash multi-set result for its spill information. + hashMultiSetResultCount++; + break; + case NOMATCH: + break; + } + } + + // Regardless of our matching result, we keep that information to make multiple use + // of it for a possible series of equal keys. + haveSaveKey = true; + + /* + * Single-Column String specific save key. + */ + + saveKeyBatchIndex = batchIndex; + + /* + * Single-Column String specific lookup key. + */ + + byte[] keyBytes = vector[batchIndex]; + int keyStart = start[batchIndex]; + int keyLength = length[batchIndex]; + saveJoinResult = hashMultiSet.contains(keyBytes, keyStart, keyLength, hashMultiSetResults[hashMultiSetResultCount]); + + /* + * Common inner big-only join result processing. + */ + + switch (saveJoinResult) { + case MATCH: + equalKeySeriesValueCounts[equalKeySeriesCount] = hashMultiSetResults[hashMultiSetResultCount].count(); + equalKeySeriesAllMatchIndices[equalKeySeriesCount] = allMatchCount; + equalKeySeriesDuplicateCounts[equalKeySeriesCount] = 1; + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH isSingleValue " + equalKeySeriesIsSingleValue[equalKeySeriesCount] + " currentKey " + currentKey); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashMultiSetResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH" + " currentKey " + currentKey); + break; + } + } else { + // Series of equal keys. + + switch (saveJoinResult) { + case MATCH: + equalKeySeriesDuplicateCounts[equalKeySeriesCount]++; + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH duplicate"); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashMultiSetResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH duplicate"); + break; + } + } + } + + if (haveSaveKey) { + // Update our counts for the last key. + switch (saveJoinResult) { + case MATCH: + // We have extracted the count from the hash multi-set result, so we don't keep it. + equalKeySeriesCount++; + break; + case SPILL: + // We keep the hash multi-set result for its spill information. + hashMultiSetResultCount++; + break; + case NOMATCH: + break; + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + + " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) + + " equalKeySeriesValueCounts " + longArrayToRangesString(equalKeySeriesValueCounts, equalKeySeriesCount) + + " equalKeySeriesAllMatchIndices " + intArrayToRangesString(equalKeySeriesAllMatchIndices, equalKeySeriesCount) + + " equalKeySeriesDuplicateCounts " + intArrayToRangesString(equalKeySeriesDuplicateCounts, equalKeySeriesCount) + + " spills " + intArrayToRangesString(spills, spillCount) + + " spillHashMapResultIndices " + intArrayToRangesString(spillHashMapResultIndices, spillCount) + + " hashMapResults " + Arrays.toString(Arrays.copyOfRange(hashMultiSetResults, 0, hashMultiSetResultCount))); + } + + numSel = finishInnerBigOnly(batch, + allMatchs, allMatchCount, + equalKeySeriesValueCounts, equalKeySeriesAllMatchIndices, + equalKeySeriesDuplicateCounts, equalKeySeriesCount, + spills, spillHashMapResultIndices, spillCount, + (VectorMapJoinHashTableResult[]) hashMultiSetResults, hashMultiSetResultCount); + } + + batch.selectedInUse = true; + batch.size = numSel; + + if (batch.size > 0) { + // Forward any remaining selected rows. + forwardBigTableBatch(batch); + } + + } catch (IOException e) { + throw new HiveException(e); + } catch (Exception e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerGenerateResultOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerGenerateResultOperator.java new file mode 100644 index 0000000..a7eb454 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerGenerateResultOperator.java @@ -0,0 +1,236 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMap; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTableResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMapResult; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +/** + * This class has methods for generating vectorized join results for inner joins. + * + * Inner joins use a hash map to lookup the 1 or more small table values. + * + * One vector inner join optimization is projecting inner keys. When a key appears + * in the small table results area, instead of copying or referencing key we just include + * that key again in the output projection. + * + * Another optimization is when an inner join does not have any small table columns in the + * join result, we use a different variation call inner big only. That variation uses + * a hash multi-set instead of hash map since there are no values (just a count). + */ +public abstract class VectorMapJoinInnerGenerateResultOperator + extends VectorMapJoinGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinInnerGenerateResultOperator.class.getName()); + + //--------------------------------------------------------------------------- + // Inner join specific members. + // + + // An array of hash map results so we can do lookups on the whole batch before output result + // generation. + protected transient VectorMapJoinHashMapResult hashMapResults[]; + + // Pre-allocated member for storing the (physical) batch index of matching row (single- or + // multi-small-table-valued) indexes during a process call. + protected transient int[] allMatchs; + + /* + * Pre-allocated members for storing information equal key series for small-table matches. + * + * ~HashMapResultIndices + * Index into the hashMapResults array for the match. + * ~AllMatchIndices + * (Logical) indices into allMatchs to the first row of a match of a + * possible series of duplicate keys. + * ~IsSingleValue + * Whether there is 1 or multiple small table values. + * ~DuplicateCounts + * The duplicate count for each matched key. + * + */ + protected transient int[] equalKeySeriesHashMapResultIndices; + protected transient int[] equalKeySeriesAllMatchIndices; + protected transient boolean[] equalKeySeriesIsSingleValue; + protected transient int[] equalKeySeriesDuplicateCounts; + + // Pre-allocated member for storing the (physical) batch index of rows that need to be spilled. + protected transient int[] spills; + + // Pre-allocated member for storing index into the hashMapResults for each spilled row. + protected transient int[] spillHashMapResultIndices; + + public VectorMapJoinInnerGenerateResultOperator() { + super(); + } + + public VectorMapJoinInnerGenerateResultOperator(VectorizationContext vContext, OperatorDesc conf) + throws HiveException { + super(vContext, conf); + } + + /* + * Setup our inner join specific members. + */ + protected void commonSetup(VectorizedRowBatch batch) throws HiveException { + super.commonSetup(batch); + + // Inner join specific. + VectorMapJoinHashMap baseHashMap = (VectorMapJoinHashMap) vectorMapJoinHashTable; + + hashMapResults = new VectorMapJoinHashMapResult[batch.DEFAULT_SIZE]; + for (int i = 0; i < hashMapResults.length; i++) { + hashMapResults[i] = baseHashMap.createHashMapResult(); + } + + allMatchs = new int[batch.DEFAULT_SIZE]; + + equalKeySeriesHashMapResultIndices = new int[batch.DEFAULT_SIZE]; + equalKeySeriesAllMatchIndices = new int[batch.DEFAULT_SIZE]; + equalKeySeriesIsSingleValue = new boolean[batch.DEFAULT_SIZE]; + equalKeySeriesDuplicateCounts = new int[batch.DEFAULT_SIZE]; + + spills = new int[batch.DEFAULT_SIZE]; + spillHashMapResultIndices = new int[batch.DEFAULT_SIZE]; + } + + /* + * Inner join (hash map). + */ + + /** + * Generate the inner join output results for one vectorized row batch. + * + * @param batch + * The big table batch with any matching and any non matching rows both as + * selected in use. + * @param allMatchs + * A subset of the rows of the batch that are matches. + * @param allMatchCount + * Number of matches in allMatchs. + * @param equalKeySeriesHashMapResultIndices + * For each equal key series, the index into the hashMapResult. + * @param equalKeySeriesAllMatchIndices + * For each equal key series, the logical index into allMatchs. + * @param equalKeySeriesIsSingleValue + * For each equal key series, whether there is 1 or multiple small table values. + * @param equalKeySeriesDuplicateCounts + * For each equal key series, the number of duplicates or equal keys. + * @param equalKeySeriesCount + * Number of single value matches. + * @param spills + * A subset of the rows of the batch that are spills. + * @param spillHashMapResultIndices + * For each entry in spills, the index into the hashMapResult. + * @param spillCount + * Number of spills in spills. + * @param hashMapResults + * The array of all hash map results for the batch. + * @param hashMapResultCount + * Number of entries in hashMapResults. + */ + protected int finishInner(VectorizedRowBatch batch, + int[] allMatchs, int allMatchCount, + int[] equalKeySeriesHashMapResultIndices, int[] equalKeySeriesAllMatchIndices, + boolean[] equalKeySeriesIsSingleValue, int[] equalKeySeriesDuplicateCounts, + int equalKeySeriesCount, + int[] spills, int[] spillHashMapResultIndices, int spillCount, + VectorMapJoinHashMapResult[] hashMapResults, int hashMapResultCount) + throws HiveException, IOException { + + int numSel = 0; + + /* + * Optimize by running value expressions only over the matched rows. + */ + if (allMatchCount > 0 && bigTableValueExpressions != null) { + performValueExpressions(batch, allMatchs, allMatchCount); + } + + for (int i = 0; i < equalKeySeriesCount; i++) { + int hashMapResultIndex = equalKeySeriesHashMapResultIndices[i]; + VectorMapJoinHashMapResult hashMapResult = hashMapResults[hashMapResultIndex]; + int allMatchesIndex = equalKeySeriesAllMatchIndices[i]; + boolean isSingleValue = equalKeySeriesIsSingleValue[i]; + int duplicateCount = equalKeySeriesDuplicateCounts[i]; + + if (isSingleValue) { + numSel = generateHashMapResultSingleValue( + batch, hashMapResult, allMatchs, allMatchesIndex, duplicateCount, numSel); + } else { + generateHashMapResultMultiValue( + batch, hashMapResult, allMatchs, allMatchesIndex, duplicateCount); + } + } + + if (spillCount > 0) { + spillHashMapBatch(batch, (VectorMapJoinHashTableResult[]) hashMapResults, + spills, spillHashMapResultIndices, spillCount); + } + + return numSel; + } + + protected int finishInnerRepeated(VectorizedRowBatch batch, JoinUtil.JoinResult joinResult, + VectorMapJoinHashTableResult hashMapResult) throws HiveException, IOException { + + int numSel = 0; + + switch (joinResult) { + case MATCH: + + if (bigTableValueExpressions != null) { + // Run our value expressions over whole batch. + for(VectorExpression ve: bigTableValueExpressions) { + ve.evaluate(batch); + } + } + + // Generate special repeated case. + numSel = generateHashMapResultRepeatedAll(batch, hashMapResults[0]); + break; + + case SPILL: + // Whole batch is spilled. + spillBatchRepeated(batch, (VectorMapJoinHashTableResult) hashMapResults[0]); + break; + + case NOMATCH: + // No match for entire batch. + break; + } + /* + * Common repeated join result processing. + */ + + return numSel; + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java new file mode 100644 index 0000000..c998252 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java @@ -0,0 +1,378 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +// Single-Column Long hash table import. +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinLongHashMap; + +// Single-Column Long specific imports. +import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; + +/* + * Specialized class for doing a vectorized map join that is an inner join on a Single-Column Long + * using a hash map. + */ +public class VectorMapJoinInnerLongOperator extends VectorMapJoinInnerGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinInnerLongOperator.class.getName()); + private static final String CLASS_NAME = VectorMapJoinInnerLongOperator.class.getName(); + + // (none) + + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- + + // The hash map for this specialized class. + private transient VectorMapJoinLongHashMap hashMap; + + //--------------------------------------------------------------------------- + // Single-Column Long specific members. + // + + // For integers, we have optional min/max filtering. + private transient boolean useMinMax; + private transient long min; + private transient long max; + + // The column number for this one column join specialization. + private transient int singleJoinColumn; + + //--------------------------------------------------------------------------- + // Pass-thru constructors. + // + + public VectorMapJoinInnerLongOperator() { + super(); + } + + public VectorMapJoinInnerLongOperator(VectorizationContext vContext, OperatorDesc conf) throws HiveException { + super(vContext, conf); + } + + //--------------------------------------------------------------------------- + // Process Single-Column Long Inner Join on a vectorized row batch. + // + + @Override + public void process(Object row, int tag) throws HiveException { + + try { + VectorizedRowBatch batch = (VectorizedRowBatch) row; + + alias = (byte) tag; + + if (needCommonSetup) { + // Our one time process method initialization. + commonSetup(batch); + + /* + * Initialize Single-Column Long members for this specialized class. + */ + + singleJoinColumn = bigTableKeyColumnMap[0]; + + needCommonSetup = false; + } + + if (needHashTableSetup) { + // Setup our hash table specialization. It will be the first time the process + // method is called, or after a Hybrid Grace reload. + + /* + * Get our Single-Column Long hash map information for this specialized class. + */ + + hashMap = (VectorMapJoinLongHashMap) vectorMapJoinHashTable; + useMinMax = hashMap.useMinMax(); + if (useMinMax) { + min = hashMap.min(); + max = hashMap.max(); + } + + needHashTableSetup = false; + } + + batchCounter++; + + // For inner joins, we may apply the filter(s) now. + for(VectorExpression ve : bigTableFilterExpressions) { + ve.evaluate(batch); + } + + final int inputLogicalSize = batch.size; + + if (inputLogicalSize == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); + } + return; + } + + // Perform any key expressions. Results will go into scratch columns. + if (bigTableKeyExpressions != null) { + for (VectorExpression ve : bigTableKeyExpressions) { + ve.evaluate(batch); + } + } + + // We rebuild in-place the selected array with rows destine to be forwarded. + int numSel = 0; + + /* + * Single-Column Long specific declarations. + */ + + // The one join column for this specialized class. + LongColumnVector joinColVector = (LongColumnVector) batch.cols[singleJoinColumn]; + long[] vector = joinColVector.vector; + + /* + * Single-Column Long check for repeating. + */ + + // Check single column for repeating. + boolean allKeyInputColumnsRepeating = joinColVector.isRepeating; + + if (allKeyInputColumnsRepeating) { + + /* + * Repeating. + */ + + // All key input columns are repeating. Generate key once. Lookup once. + // Since the key is repeated, we must use entry 0 regardless of selectedInUse. + + /* + * Single-Column Long specific repeated lookup. + */ + + long key = vector[0]; + JoinUtil.JoinResult joinResult; + if (useMinMax && (key < min || key > max)) { + // Out of range for whole batch. + joinResult = JoinUtil.JoinResult.NOMATCH; + } else { + joinResult = hashMap.lookup(key, hashMapResults[0]); + } + + /* + * Common repeated join result processing. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); + } + numSel = finishInnerRepeated(batch, joinResult, hashMapResults[0]); + } else { + + /* + * NOT Repeating. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); + } + + // We remember any matching rows in matchs / matchSize. At the end of the loop, + // selected / batch.size will represent both matching and non-matching rows for outer join. + // Only deferred rows will have been removed from selected. + int selected[] = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + int hashMapResultCount = 0; + int allMatchCount = 0; + int equalKeySeriesCount = 0; + int spillCount = 0; + + /* + * Single-Column Long specific variables. + */ + + long saveKey = 0; + + // We optimize performance by only looking up the first key in a series of equal keys. + boolean haveSaveKey = false; + JoinUtil.JoinResult saveJoinResult = JoinUtil.JoinResult.NOMATCH; + + // Logical loop over the rows in the batch since the batch may have selected in use. + for (int logical = 0; logical < inputLogicalSize; logical++) { + int batchIndex = (selectedInUse ? selected[logical] : logical); + + /* + * Single-Column Long get key. + */ + + long currentKey = vector[batchIndex]; + + /* + * Equal key series checking. + */ + + if (!haveSaveKey || currentKey != saveKey) { + + // New key. + + if (haveSaveKey) { + // Move on with our counts. + switch (saveJoinResult) { + case MATCH: + hashMapResultCount++; + equalKeySeriesCount++; + break; + case SPILL: + hashMapResultCount++; + break; + case NOMATCH: + break; + } + } + + // Regardless of our matching result, we keep that information to make multiple use + // of it for a possible series of equal keys. + haveSaveKey = true; + + /* + * Single-Column Long specific save key. + */ + + saveKey = currentKey; + + /* + * Single-Column Long specific lookup key. + */ + + if (useMinMax && (currentKey < min || currentKey > max)) { + // Key out of range for whole hash table. + saveJoinResult = JoinUtil.JoinResult.NOMATCH; + } else { + saveJoinResult = hashMap.lookup(currentKey, hashMapResults[hashMapResultCount]); + } + + /* + * Common inner join result processing. + */ + + switch (saveJoinResult) { + case MATCH: + equalKeySeriesHashMapResultIndices[equalKeySeriesCount] = hashMapResultCount; + equalKeySeriesAllMatchIndices[equalKeySeriesCount] = allMatchCount; + equalKeySeriesIsSingleValue[equalKeySeriesCount] = hashMapResults[hashMapResultCount].isSingleRow(); + equalKeySeriesDuplicateCounts[equalKeySeriesCount] = 1; + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH isSingleValue " + equalKeySeriesIsSingleValue[equalKeySeriesCount] + " currentKey " + currentKey); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashMapResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH" + " currentKey " + currentKey); + break; + } + } else { + // Series of equal keys. + + switch (saveJoinResult) { + case MATCH: + equalKeySeriesDuplicateCounts[equalKeySeriesCount]++; + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH duplicate"); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashMapResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH duplicate"); + break; + } + } + } + + if (haveSaveKey) { + // Update our counts for the last key. + switch (saveJoinResult) { + case MATCH: + hashMapResultCount++; + equalKeySeriesCount++; + break; + case SPILL: + hashMapResultCount++; + break; + case NOMATCH: + break; + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + + " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) + + " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) + + " equalKeySeriesAllMatchIndices " + intArrayToRangesString(equalKeySeriesAllMatchIndices, equalKeySeriesCount) + + " equalKeySeriesIsSingleValue " + Arrays.toString(Arrays.copyOfRange(equalKeySeriesIsSingleValue, 0, equalKeySeriesCount)) + + " equalKeySeriesDuplicateCounts " + Arrays.toString(Arrays.copyOfRange(equalKeySeriesDuplicateCounts, 0, equalKeySeriesCount)) + + " spills " + intArrayToRangesString(spills, spillCount) + + " spillHashMapResultIndices " + intArrayToRangesString(spillHashMapResultIndices, spillCount) + + " hashMapResults " + Arrays.toString(Arrays.copyOfRange(hashMapResults, 0, hashMapResultCount))); + } + + numSel = finishInner(batch, + allMatchs, allMatchCount, + equalKeySeriesHashMapResultIndices, equalKeySeriesAllMatchIndices, + equalKeySeriesIsSingleValue, equalKeySeriesDuplicateCounts, + equalKeySeriesCount, + spills, spillHashMapResultIndices, spillCount, + hashMapResults, hashMapResultCount); + } + + batch.selectedInUse = true; + batch.size = numSel; + + if (batch.size > 0) { + // Forward any remaining selected rows. + forwardBigTableBatch(batch); + } + + } catch (IOException e) { + throw new HiveException(e); + } catch (Exception e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java new file mode 100644 index 0000000..e426476 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java @@ -0,0 +1,390 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +// Multi-Key hash table import. +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashMap; + +// Multi-Key specific imports. +import org.apache.hadoop.hive.ql.exec.vector.VectorSerializeRowNoNulls; +import org.apache.hadoop.hive.serde2.ByteStream.Output; +import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableSerializeWrite; + +/* + * Specialized class for doing a vectorized map join that is an inner join on a Multi-Key + * using a hash map. + */ +public class VectorMapJoinInnerMultiKeyOperator extends VectorMapJoinInnerGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinInnerMultiKeyOperator.class.getName()); + private static final String CLASS_NAME = VectorMapJoinInnerMultiKeyOperator.class.getName(); + + // (none) + + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- + + // The hash map for this specialized class. + private transient VectorMapJoinBytesHashMap hashMap; + + //--------------------------------------------------------------------------- + // Multi-Key specific members. + // + + // Object that can take a set of columns in row in a vectorized row batch and serialized it. + // Known to not have any nulls. + private transient VectorSerializeRowNoNulls keyVectorSerializeWriteNoNulls; + + // The BinarySortable serialization of the current key. + private transient Output currentKeyOutput; + + // The BinarySortable serialization of the saved key for a possible series of equal keys. + private transient Output saveKeyOutput; + + //--------------------------------------------------------------------------- + // Pass-thru constructors. + // + + public VectorMapJoinInnerMultiKeyOperator() { + super(); + } + + public VectorMapJoinInnerMultiKeyOperator(VectorizationContext vContext, OperatorDesc conf) throws HiveException { + super(vContext, conf); + } + + //--------------------------------------------------------------------------- + // Process Multi-Key Inner Join on a vectorized row batch. + // + + @Override + public void process(Object row, int tag) throws HiveException { + + try { + VectorizedRowBatch batch = (VectorizedRowBatch) row; + + alias = (byte) tag; + + if (needCommonSetup) { + // Our one time process method initialization. + commonSetup(batch); + + /* + * Initialize Multi-Key members for this specialized class. + */ + + keyVectorSerializeWriteNoNulls = new VectorSerializeRowNoNulls( + new BinarySortableSerializeWrite(bigTableKeyColumnMap.length)); + keyVectorSerializeWriteNoNulls.init(bigTableKeyTypeNames, bigTableKeyColumnMap); + + currentKeyOutput = new Output(); + saveKeyOutput = new Output(); + + needCommonSetup = false; + } + + if (needHashTableSetup) { + // Setup our hash table specialization. It will be the first time the process + // method is called, or after a Hybrid Grace reload. + + /* + * Get our Multi-Key hash map information for this specialized class. + */ + + hashMap = (VectorMapJoinBytesHashMap) vectorMapJoinHashTable; + + needHashTableSetup = false; + } + + batchCounter++; + + // For inner joins, we may apply the filter(s) now. + for(VectorExpression ve : bigTableFilterExpressions) { + ve.evaluate(batch); + } + + final int inputLogicalSize = batch.size; + + if (inputLogicalSize == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); + } + return; + } + + // Perform any key expressions. Results will go into scratch columns. + if (bigTableKeyExpressions != null) { + for (VectorExpression ve : bigTableKeyExpressions) { + ve.evaluate(batch); + } + } + + // We rebuild in-place the selected array with rows destine to be forwarded. + int numSel = 0; + + /* + * Multi-Key specific declarations. + */ + + // None. + + /* + * Multi-Key check for repeating. + */ + + // If all BigTable input columns to key expressions are isRepeating, then + // calculate key once; lookup once. + boolean allKeyInputColumnsRepeating; + if (bigTableKeyColumnMap.length == 0) { + allKeyInputColumnsRepeating = false; + } else { + allKeyInputColumnsRepeating = true; + for (int i = 0; i < bigTableKeyColumnMap.length; i++) { + if (!batch.cols[bigTableKeyColumnMap[i]].isRepeating) { + allKeyInputColumnsRepeating = false; + break; + } + } + } + + if (allKeyInputColumnsRepeating) { + + /* + * Repeating. + */ + + // All key input columns are repeating. Generate key once. Lookup once. + // Since the key is repeated, we must use entry 0 regardless of selectedInUse. + + /* + * Multi-Key specific repeated lookup. + */ + + keyVectorSerializeWriteNoNulls.setOutput(currentKeyOutput); + keyVectorSerializeWriteNoNulls.serializeWriteNoNulls(batch, 0); + byte[] keyBytes = currentKeyOutput.getData(); + int keyLength = currentKeyOutput.getLength(); + JoinUtil.JoinResult joinResult = hashMap.lookup(keyBytes, 0, keyLength, hashMapResults[0]); + + /* + * Common repeated join result processing. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); + } + numSel = finishInnerRepeated(batch, joinResult, hashMapResults[0]); + } else { + + /* + * NOT Repeating. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); + } + + // We remember any matching rows in matchs / matchSize. At the end of the loop, + // selected / batch.size will represent both matching and non-matching rows for outer join. + // Only deferred rows will have been removed from selected. + int selected[] = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + int hashMapResultCount = 0; + int allMatchCount = 0; + int equalKeySeriesCount = 0; + int spillCount = 0; + + /* + * Multi-Key specific variables. + */ + + Output temp; + + // We optimize performance by only looking up the first key in a series of equal keys. + boolean haveSaveKey = false; + JoinUtil.JoinResult saveJoinResult = JoinUtil.JoinResult.NOMATCH; + + // Logical loop over the rows in the batch since the batch may have selected in use. + for (int logical = 0; logical < inputLogicalSize; logical++) { + int batchIndex = (selectedInUse ? selected[logical] : logical); + + /* + * Multi-Key get key. + */ + + // Generate binary sortable key for current row in vectorized row batch. + keyVectorSerializeWriteNoNulls.setOutput(currentKeyOutput); + keyVectorSerializeWriteNoNulls.serializeWriteNoNulls(batch, batchIndex); + + /* + * Equal key series checking. + */ + + if (!haveSaveKey || !saveKeyOutput.arraysEquals(currentKeyOutput)) { + + // New key. + + if (haveSaveKey) { + // Move on with our counts. + switch (saveJoinResult) { + case MATCH: + hashMapResultCount++; + equalKeySeriesCount++; + break; + case SPILL: + hashMapResultCount++; + break; + case NOMATCH: + break; + } + } + + // Regardless of our matching result, we keep that information to make multiple use + // of it for a possible series of equal keys. + haveSaveKey = true; + + /* + * Multi-Key specific save key and lookup. + */ + + temp = saveKeyOutput; + saveKeyOutput = currentKeyOutput; + currentKeyOutput = temp; + + /* + * Multi-Key specific lookup key. + */ + + byte[] keyBytes = saveKeyOutput.getData(); + int keyLength = saveKeyOutput.getLength(); + saveJoinResult = hashMap.lookup(keyBytes, 0, keyLength, hashMapResults[hashMapResultCount]); + + /* + * Common inner join result processing. + */ + + switch (saveJoinResult) { + case MATCH: + equalKeySeriesHashMapResultIndices[equalKeySeriesCount] = hashMapResultCount; + equalKeySeriesAllMatchIndices[equalKeySeriesCount] = allMatchCount; + equalKeySeriesIsSingleValue[equalKeySeriesCount] = hashMapResults[hashMapResultCount].isSingleRow(); + equalKeySeriesDuplicateCounts[equalKeySeriesCount] = 1; + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH isSingleValue " + equalKeySeriesIsSingleValue[equalKeySeriesCount] + " currentKey " + currentKey); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashMapResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH" + " currentKey " + currentKey); + break; + } + } else { + // Series of equal keys. + + switch (saveJoinResult) { + case MATCH: + equalKeySeriesDuplicateCounts[equalKeySeriesCount]++; + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH duplicate"); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashMapResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH duplicate"); + break; + } + } + } + + if (haveSaveKey) { + // Update our counts for the last key. + switch (saveJoinResult) { + case MATCH: + hashMapResultCount++; + equalKeySeriesCount++; + break; + case SPILL: + hashMapResultCount++; + break; + case NOMATCH: + break; + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + + " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) + + " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) + + " equalKeySeriesAllMatchIndices " + intArrayToRangesString(equalKeySeriesAllMatchIndices, equalKeySeriesCount) + + " equalKeySeriesIsSingleValue " + Arrays.toString(Arrays.copyOfRange(equalKeySeriesIsSingleValue, 0, equalKeySeriesCount)) + + " equalKeySeriesDuplicateCounts " + Arrays.toString(Arrays.copyOfRange(equalKeySeriesDuplicateCounts, 0, equalKeySeriesCount)) + + " spills " + intArrayToRangesString(spills, spillCount) + + " spillHashMapResultIndices " + intArrayToRangesString(spillHashMapResultIndices, spillCount) + + " hashMapResults " + Arrays.toString(Arrays.copyOfRange(hashMapResults, 0, hashMapResultCount))); + } + + numSel = finishInner(batch, + allMatchs, allMatchCount, + equalKeySeriesHashMapResultIndices, equalKeySeriesAllMatchIndices, + equalKeySeriesIsSingleValue, equalKeySeriesDuplicateCounts, + equalKeySeriesCount, + spills, spillHashMapResultIndices, spillCount, + hashMapResults, hashMapResultCount); + } + + batch.selectedInUse = true; + batch.size = numSel; + + if (batch.size > 0) { + // Forward any remaining selected rows. + forwardBigTableBatch(batch); + } + + } catch (IOException e) { + throw new HiveException(e); + } catch (Exception e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java new file mode 100644 index 0000000..3bc225a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java @@ -0,0 +1,367 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +// Single-Column String hash table import. +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashMap; + +// Single-Column String specific imports. +import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr; + +/* + * Specialized class for doing a vectorized map join that is an inner join on a Single-Column String + * using a hash map. + */ +public class VectorMapJoinInnerStringOperator extends VectorMapJoinInnerGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinInnerStringOperator.class.getName()); + private static final String CLASS_NAME = VectorMapJoinInnerStringOperator.class.getName(); + + // (none) + + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- + + // The hash map for this specialized class. + private transient VectorMapJoinBytesHashMap hashMap; + + //--------------------------------------------------------------------------- + // Single-Column String specific members. + // + + // The column number for this one column join specialization. + private transient int singleJoinColumn; + + //--------------------------------------------------------------------------- + // Pass-thru constructors. + // + + public VectorMapJoinInnerStringOperator() { + super(); + } + + public VectorMapJoinInnerStringOperator(VectorizationContext vContext, OperatorDesc conf) throws HiveException { + super(vContext, conf); + } + + //--------------------------------------------------------------------------- + // Process Single-Column String Inner Join on a vectorized row batch. + // + + @Override + public void process(Object row, int tag) throws HiveException { + + try { + VectorizedRowBatch batch = (VectorizedRowBatch) row; + + alias = (byte) tag; + + if (needCommonSetup) { + // Our one time process method initialization. + commonSetup(batch); + + /* + * Initialize Single-Column String members for this specialized class. + */ + + singleJoinColumn = bigTableKeyColumnMap[0]; + + needCommonSetup = false; + } + + if (needHashTableSetup) { + // Setup our hash table specialization. It will be the first time the process + // method is called, or after a Hybrid Grace reload. + + /* + * Get our Single-Column String hash map information for this specialized class. + */ + + hashMap = (VectorMapJoinBytesHashMap) vectorMapJoinHashTable; + + needHashTableSetup = false; + } + + batchCounter++; + + // For inner joins, we may apply the filter(s) now. + for(VectorExpression ve : bigTableFilterExpressions) { + ve.evaluate(batch); + } + + final int inputLogicalSize = batch.size; + + if (inputLogicalSize == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); + } + return; + } + + // Perform any key expressions. Results will go into scratch columns. + if (bigTableKeyExpressions != null) { + for (VectorExpression ve : bigTableKeyExpressions) { + ve.evaluate(batch); + } + } + + // We rebuild in-place the selected array with rows destine to be forwarded. + int numSel = 0; + + /* + * Single-Column String specific declarations. + */ + + // The one join column for this specialized class. + BytesColumnVector joinColVector = (BytesColumnVector) batch.cols[singleJoinColumn]; + byte[][] vector = joinColVector.vector; + int[] start = joinColVector.start; + int[] length = joinColVector.length; + + /* + * Single-Column String check for repeating. + */ + + // Check single column for repeating. + boolean allKeyInputColumnsRepeating = joinColVector.isRepeating; + + if (allKeyInputColumnsRepeating) { + + /* + * Repeating. + */ + + // All key input columns are repeating. Generate key once. Lookup once. + // Since the key is repeated, we must use entry 0 regardless of selectedInUse. + + /* + * Single-Column String specific repeated lookup. + */ + + byte[] keyBytes = vector[0]; + int keyStart = start[0]; + int keyLength = length[0]; + JoinUtil.JoinResult joinResult = hashMap.lookup(keyBytes, keyStart, keyLength, hashMapResults[0]); + + /* + * Common repeated join result processing. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); + } + numSel = finishInnerRepeated(batch, joinResult, hashMapResults[0]); + } else { + + /* + * NOT Repeating. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); + } + + // We remember any matching rows in matchs / matchSize. At the end of the loop, + // selected / batch.size will represent both matching and non-matching rows for outer join. + // Only deferred rows will have been removed from selected. + int selected[] = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + int hashMapResultCount = 0; + int allMatchCount = 0; + int equalKeySeriesCount = 0; + int spillCount = 0; + + /* + * Single-Column String specific variables. + */ + + int saveKeyBatchIndex = -1; + + // We optimize performance by only looking up the first key in a series of equal keys. + boolean haveSaveKey = false; + JoinUtil.JoinResult saveJoinResult = JoinUtil.JoinResult.NOMATCH; + + // Logical loop over the rows in the batch since the batch may have selected in use. + for (int logical = 0; logical < inputLogicalSize; logical++) { + int batchIndex = (selectedInUse ? selected[logical] : logical); + + /* + * Single-Column String get key. + */ + + // Implicit -- use batchIndex. + + /* + * Equal key series checking. + */ + + if (!haveSaveKey || + StringExpr.compare(vector[saveKeyBatchIndex], start[saveKeyBatchIndex], length[saveKeyBatchIndex], + vector[batchIndex], start[batchIndex], length[batchIndex]) != 0) { + + // New key. + + if (haveSaveKey) { + // Move on with our counts. + switch (saveJoinResult) { + case MATCH: + hashMapResultCount++; + equalKeySeriesCount++; + break; + case SPILL: + hashMapResultCount++; + break; + case NOMATCH: + break; + } + } + + // Regardless of our matching result, we keep that information to make multiple use + // of it for a possible series of equal keys. + haveSaveKey = true; + + /* + * Single-Column String specific save key. + */ + + saveKeyBatchIndex = batchIndex; + + /* + * Single-Column String specific lookup key. + */ + + byte[] keyBytes = vector[batchIndex]; + int keyStart = start[batchIndex]; + int keyLength = length[batchIndex]; + saveJoinResult = hashMap.lookup(keyBytes, keyStart, keyLength, hashMapResults[hashMapResultCount]); + + /* + * Common inner join result processing. + */ + + switch (saveJoinResult) { + case MATCH: + equalKeySeriesHashMapResultIndices[equalKeySeriesCount] = hashMapResultCount; + equalKeySeriesAllMatchIndices[equalKeySeriesCount] = allMatchCount; + equalKeySeriesIsSingleValue[equalKeySeriesCount] = hashMapResults[hashMapResultCount].isSingleRow(); + equalKeySeriesDuplicateCounts[equalKeySeriesCount] = 1; + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH isSingleValue " + equalKeySeriesIsSingleValue[equalKeySeriesCount] + " currentKey " + currentKey); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashMapResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH" + " currentKey " + currentKey); + break; + } + } else { + // Series of equal keys. + + switch (saveJoinResult) { + case MATCH: + equalKeySeriesDuplicateCounts[equalKeySeriesCount]++; + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH duplicate"); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashMapResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH duplicate"); + break; + } + } + } + + if (haveSaveKey) { + // Update our counts for the last key. + switch (saveJoinResult) { + case MATCH: + hashMapResultCount++; + equalKeySeriesCount++; + break; + case SPILL: + hashMapResultCount++; + break; + case NOMATCH: + break; + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + + " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) + + " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) + + " equalKeySeriesAllMatchIndices " + intArrayToRangesString(equalKeySeriesAllMatchIndices, equalKeySeriesCount) + + " equalKeySeriesIsSingleValue " + Arrays.toString(Arrays.copyOfRange(equalKeySeriesIsSingleValue, 0, equalKeySeriesCount)) + + " equalKeySeriesDuplicateCounts " + Arrays.toString(Arrays.copyOfRange(equalKeySeriesDuplicateCounts, 0, equalKeySeriesCount)) + + " spills " + intArrayToRangesString(spills, spillCount) + + " spillHashMapResultIndices " + intArrayToRangesString(spillHashMapResultIndices, spillCount) + + " hashMapResults " + Arrays.toString(Arrays.copyOfRange(hashMapResults, 0, hashMapResultCount))); + } + + numSel = finishInner(batch, + allMatchs, allMatchCount, + equalKeySeriesHashMapResultIndices, equalKeySeriesAllMatchIndices, + equalKeySeriesIsSingleValue, equalKeySeriesDuplicateCounts, + equalKeySeriesCount, + spills, spillHashMapResultIndices, spillCount, + hashMapResults, hashMapResultCount); + } + + batch.selectedInUse = true; + batch.size = numSel; + + if (batch.size > 0) { + // Forward any remaining selected rows. + forwardBigTableBatch(batch); + } + + } catch (IOException e) { + throw new HiveException(e); + } catch (Exception e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiGenerateResultOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiGenerateResultOperator.java new file mode 100644 index 0000000..57571e6 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiGenerateResultOperator.java @@ -0,0 +1,255 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashSet; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashSetResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTableResult; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +/** + * This class has methods for generating vectorized join results for left semi joins. + * + * The big difference between inner joins and left semi joins is existence testing. + * + * Inner joins use a hash map to lookup the 1 or more small table values. + * + * Left semi joins are a specialized join for outputting big table rows whose key exists + * in the small table. + * + * No small table values are needed for left semi join since they would be empty. So, + * we use a hash set as the hash table. Hash sets just report whether a key exists. This + * is a big performance optimization. + */ +public abstract class VectorMapJoinLeftSemiGenerateResultOperator + extends VectorMapJoinGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinLeftSemiGenerateResultOperator.class.getName()); + + //--------------------------------------------------------------------------- + // Semi join specific members. + // + + // An array of hash set results so we can do lookups on the whole batch before output result + // generation. + protected transient VectorMapJoinHashSetResult hashSetResults[]; + + // Pre-allocated member for storing the (physical) batch index of matching row (single- or + // multi-small-table-valued) indexes during a process call. + protected transient int[] allMatchs; + + // Pre-allocated member for storing the (physical) batch index of rows that need to be spilled. + protected transient int[] spills; + + // Pre-allocated member for storing index into the hashSetResults for each spilled row. + protected transient int[] spillHashMapResultIndices; + + public VectorMapJoinLeftSemiGenerateResultOperator() { + super(); + } + + public VectorMapJoinLeftSemiGenerateResultOperator(VectorizationContext vContext, OperatorDesc conf) + throws HiveException { + super(vContext, conf); + } + + /* + * Setup our left semi join specific members. + */ + protected void commonSetup(VectorizedRowBatch batch) throws HiveException { + super.commonSetup(batch); + + // Semi join specific. + VectorMapJoinHashSet baseHashSet = (VectorMapJoinHashSet) vectorMapJoinHashTable; + + hashSetResults = new VectorMapJoinHashSetResult[batch.DEFAULT_SIZE]; + for (int i = 0; i < hashSetResults.length; i++) { + hashSetResults[i] = baseHashSet.createHashSetResult(); + } + + allMatchs = new int[batch.DEFAULT_SIZE]; + + spills = new int[batch.DEFAULT_SIZE]; + spillHashMapResultIndices = new int[batch.DEFAULT_SIZE]; + } + + //----------------------------------------------------------------------------------------------- + + /* + * Left semi join (hash set). + */ + + /** + * Generate the left semi join output results for one vectorized row batch. + * + * @param batch + * The big table batch with any matching and any non matching rows both as + * selected in use. + * @param allMatchs + * A subset of the rows of the batch that are matches. + * @param allMatchCount + * Number of matches in allMatchs. + * @param spills + * A subset of the rows of the batch that are spills. + * @param spillHashMapResultIndices + * For each entry in spills, the index into the hashTableResults. + * @param spillCount + * Number of spills in spills. + * @param hashTableResults + * The array of all hash table results for the batch. We need the + * VectorMapJoinHashTableResult for the spill information. + */ + protected int finishLeftSemi(VectorizedRowBatch batch, + int[] allMatchs, int allMatchCount, + int[] spills, int[] spillHashMapResultIndices, int spillCount, + VectorMapJoinHashTableResult[] hashTableResults) throws HiveException, IOException { + + int numSel; + + /* + * Optimize by running value expressions only over the matched rows. + */ + if (allMatchCount > 0 && bigTableValueExpressions != null) { + performValueExpressions(batch, allMatchs, allMatchCount); + } + + numSel = generateHashSetResults(batch, allMatchs, allMatchCount); + + if (spillCount > 0) { + spillHashMapBatch(batch, hashTableResults, + spills, spillHashMapResultIndices, spillCount); + } + + return numSel; + } + + /** + * Generate the matching left semi join output results of a vectorized row batch. + * + * @param batch + * The big table batch. + * @param allMatchs + * A subset of the rows of the batch that are matches. + * @param allMatchCount + * Number of matches in allMatchs. + */ + private int generateHashSetResults(VectorizedRowBatch batch, + int[] allMatchs, int allMatchCount) + throws HiveException, IOException { + + int numSel = 0; + + if (!onlyUseOverflowBatch) { + + // Generate result within big table batch itself. + + for (int i = 0; i < allMatchCount; i++) { + + int batchIndex = allMatchs[i]; + + // Use the big table row as output. + batch.selected[numSel++] = batchIndex; + } + } else { + + // Generate result in overflow batch. + + for (int i = 0; i < allMatchCount; i++) { + + int batchIndex = allMatchs[i]; + + // Copy the BigTable values into the overflow batch. Since the overflow batch may + // not get flushed here, we must copy by value. + if (bigTableRetainedVectorCopy != null) { + bigTableRetainedVectorCopy.copyByValue(batch, batchIndex, + overflowBatch, overflowBatch.size); + } + + overflowBatch.size++; + if (overflowBatch.size == overflowBatch.DEFAULT_SIZE) { + forwardOverflow(); + } + } + } + return numSel; + } + + /** + * Generate the left semi join output results for one vectorized row batch with a repeated key. + * + * @param batch + * The big table batch whose repeated key matches. + */ + protected int generateHashSetResultRepeatedAll(VectorizedRowBatch batch) throws HiveException { + + if (batch.selectedInUse) { + // The selected array is already filled in as we want it. + } else { + int[] selected = batch.selected; + for (int i = 0; i < batch.size; i++) { + selected[i] = i; + } + batch.selectedInUse = true; + } + + return batch.size; + } + + protected int finishLeftSemiRepeated(VectorizedRowBatch batch, JoinUtil.JoinResult joinResult, + VectorMapJoinHashTableResult hashSetResult) throws HiveException, IOException { + + int numSel = 0; + + switch (joinResult) { + case MATCH: + + if (bigTableValueExpressions != null) { + // Run our value expressions over whole batch. + for(VectorExpression ve: bigTableValueExpressions) { + ve.evaluate(batch); + } + } + + // Generate special repeated case. + numSel = generateHashSetResultRepeatedAll(batch); + break; + + case SPILL: + // Whole batch is spilled. + spillBatchRepeated(batch, (VectorMapJoinHashTableResult) hashSetResult); + break; + + case NOMATCH: + // No match for entire batch. + break; + } + + return numSel; + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java new file mode 100644 index 0000000..dd614da --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java @@ -0,0 +1,366 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTableResult; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +// Single-Column Long hash table import. +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinLongHashSet; + +// Single-Column Long specific imports. +import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; + +/* + * Specialized class for doing a vectorized map join that is an left semi join on a Single-Column Long + * using a hash set. + */ +public class VectorMapJoinLeftSemiLongOperator extends VectorMapJoinLeftSemiGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinInnerBigOnlyLongOperator.class.getName()); + private static final String CLASS_NAME = VectorMapJoinLeftSemiLongOperator.class.getName(); + + // (none) + + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- + + // The hash map for this specialized class. + private transient VectorMapJoinLongHashSet hashSet; + + //--------------------------------------------------------------------------- + // Single-Column Long specific members. + // + + // For integers, we have optional min/max filtering. + private transient boolean useMinMax; + private transient long min; + private transient long max; + + // The column number for this one column join specialization. + private transient int singleJoinColumn; + + //--------------------------------------------------------------------------- + // Pass-thru constructors. + // + + public VectorMapJoinLeftSemiLongOperator() { + super(); + } + + public VectorMapJoinLeftSemiLongOperator(VectorizationContext vContext, OperatorDesc conf) throws HiveException { + super(vContext, conf); + } + + //--------------------------------------------------------------------------- + // Process Single-Column Long Left-Semi Join on a vectorized row batch. + // + + @Override + public void process(Object row, int tag) throws HiveException { + + try { + VectorizedRowBatch batch = (VectorizedRowBatch) row; + + alias = (byte) tag; + + if (needCommonSetup) { + // Our one time process method initialization. + commonSetup(batch); + + /* + * Initialize Single-Column Long members for this specialized class. + */ + + singleJoinColumn = bigTableKeyColumnMap[0]; + + needCommonSetup = false; + } + + if (needHashTableSetup) { + // Setup our hash table specialization. It will be the first time the process + // method is called, or after a Hybrid Grace reload. + + /* + * Get our Single-Column Long hash set information for this specialized class. + */ + + hashSet = (VectorMapJoinLongHashSet) vectorMapJoinHashTable; + useMinMax = hashSet.useMinMax(); + if (useMinMax) { + min = hashSet.min(); + max = hashSet.max(); + } + + needHashTableSetup = false; + } + + batchCounter++; + + // For left semi joins, we may apply the filter(s) now. + for(VectorExpression ve : bigTableFilterExpressions) { + ve.evaluate(batch); + } + + final int inputLogicalSize = batch.size; + + if (inputLogicalSize == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); + } + return; + } + + // Perform any key expressions. Results will go into scratch columns. + if (bigTableKeyExpressions != null) { + for (VectorExpression ve : bigTableKeyExpressions) { + ve.evaluate(batch); + } + } + + // We rebuild in-place the selected array with rows destine to be forwarded. + int numSel = 0; + + /* + * Single-Column Long specific declarations. + */ + + // The one join column for this specialized class. + LongColumnVector joinColVector = (LongColumnVector) batch.cols[singleJoinColumn]; + long[] vector = joinColVector.vector; + + /* + * Single-Column Long check for repeating. + */ + + // Check single column for repeating. + boolean allKeyInputColumnsRepeating = joinColVector.isRepeating; + + if (allKeyInputColumnsRepeating) { + + /* + * Repeating. + */ + + // All key input columns are repeating. Generate key once. Lookup once. + // Since the key is repeated, we must use entry 0 regardless of selectedInUse. + + /* + * Single-Column Long specific repeated lookup. + */ + + long key = vector[0]; + JoinUtil.JoinResult joinResult; + if (useMinMax && (key < min || key > max)) { + // Out of range for whole batch. + joinResult = JoinUtil.JoinResult.NOMATCH; + } else { + joinResult = hashSet.contains(key, hashSetResults[0]); + } + + /* + * Common repeated join result processing. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); + } + numSel = finishLeftSemiRepeated(batch, joinResult, hashSetResults[0]); + } else { + + /* + * NOT Repeating. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); + } + + // We remember any matching rows in matchs / matchSize. At the end of the loop, + // selected / batch.size will represent both matching and non-matching rows for outer join. + // Only deferred rows will have been removed from selected. + int selected[] = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + int hashSetResultCount = 0; + int allMatchCount = 0; + int spillCount = 0; + + /* + * Single-Column Long specific variables. + */ + + long saveKey = 0; + + // We optimize performance by only looking up the first key in a series of equal keys. + boolean haveSaveKey = false; + JoinUtil.JoinResult saveJoinResult = JoinUtil.JoinResult.NOMATCH; + + // Logical loop over the rows in the batch since the batch may have selected in use. + for (int logical = 0; logical < inputLogicalSize; logical++) { + int batchIndex = (selectedInUse ? selected[logical] : logical); + + /* + * Single-Column Long get key. + */ + + long currentKey = vector[batchIndex]; + + /* + * Equal key series checking. + */ + + if (!haveSaveKey || currentKey != saveKey) { + + // New key. + + if (haveSaveKey) { + // Move on with our counts. + switch (saveJoinResult) { + case MATCH: + // We have extracted the existence from the hash set result, so we don't keep it. + break; + case SPILL: + // We keep the hash set result for its spill information. + hashSetResultCount++; + break; + case NOMATCH: + break; + } + } + + // Regardless of our matching result, we keep that information to make multiple use + // of it for a possible series of equal keys. + haveSaveKey = true; + + /* + * Single-Column Long specific save key. + */ + + saveKey = currentKey; + + /* + * Single-Column Long specific lookup key. + */ + + if (useMinMax && (currentKey < min || currentKey > max)) { + // Key out of range for whole hash table. + saveJoinResult = JoinUtil.JoinResult.NOMATCH; + } else { + saveJoinResult = hashSet.contains(currentKey, hashSetResults[hashSetResultCount]); + } + + /* + * Common left-semi join result processing. + */ + + switch (saveJoinResult) { + case MATCH: + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH isSingleValue " + equalKeySeriesIsSingleValue[equalKeySeriesCount] + " currentKey " + currentKey); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashSetResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH" + " currentKey " + currentKey); + break; + } + } else { + // Series of equal keys. + + switch (saveJoinResult) { + case MATCH: + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH duplicate"); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashSetResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH duplicate"); + break; + } + } + } + + if (haveSaveKey) { + // Update our counts for the last key. + switch (saveJoinResult) { + case MATCH: + // We have extracted the existence from the hash set result, so we don't keep it. + break; + case SPILL: + // We keep the hash set result for its spill information. + hashSetResultCount++; + break; + case NOMATCH: + break; + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + + " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) + + " spills " + intArrayToRangesString(spills, spillCount) + + " spillHashMapResultIndices " + intArrayToRangesString(spillHashMapResultIndices, spillCount) + + " hashMapResults " + Arrays.toString(Arrays.copyOfRange(hashSetResults, 0, hashSetResultCount))); + } + + numSel = finishLeftSemi(batch, + allMatchs, allMatchCount, + spills, spillHashMapResultIndices, spillCount, + (VectorMapJoinHashTableResult[]) hashSetResults); + } + + batch.selectedInUse = true; + batch.size = numSel; + + if (batch.size > 0) { + // Forward any remaining selected rows. + forwardBigTableBatch(batch); + } + + } catch (IOException e) { + throw new HiveException(e); + } catch (Exception e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java new file mode 100644 index 0000000..cf4f312 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java @@ -0,0 +1,378 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTableResult; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +// Multi-Key hash table import. +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashSet; + +// Multi-Key specific imports. +import org.apache.hadoop.hive.ql.exec.vector.VectorSerializeRowNoNulls; +import org.apache.hadoop.hive.serde2.ByteStream.Output; +import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableSerializeWrite; + +/* + * Specialized class for doing a vectorized map join that is an left semi join on Multi-Key + * using hash set. + */ +public class VectorMapJoinLeftSemiMultiKeyOperator extends VectorMapJoinLeftSemiGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinInnerBigOnlyLongOperator.class.getName()); + private static final String CLASS_NAME = VectorMapJoinLeftSemiMultiKeyOperator.class.getName(); + + // (none) + + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- + + // The hash map for this specialized class. + private transient VectorMapJoinBytesHashSet hashSet; + + //--------------------------------------------------------------------------- + // Multi-Key specific members. + // + + // Object that can take a set of columns in row in a vectorized row batch and serialized it. + // Known to not have any nulls. + private transient VectorSerializeRowNoNulls keyVectorSerializeWriteNoNulls; + + // The BinarySortable serialization of the current key. + private transient Output currentKeyOutput; + + // The BinarySortable serialization of the saved key for a possible series of equal keys. + private transient Output saveKeyOutput; + + //--------------------------------------------------------------------------- + // Pass-thru constructors. + // + + public VectorMapJoinLeftSemiMultiKeyOperator() { + super(); + } + + public VectorMapJoinLeftSemiMultiKeyOperator(VectorizationContext vContext, OperatorDesc conf) throws HiveException { + super(vContext, conf); + } + + //--------------------------------------------------------------------------- + // Process Multi-Key Left-Semi Join on a vectorized row batch. + // + + @Override + public void process(Object row, int tag) throws HiveException { + + try { + VectorizedRowBatch batch = (VectorizedRowBatch) row; + + alias = (byte) tag; + + if (needCommonSetup) { + // Our one time process method initialization. + commonSetup(batch); + + /* + * Initialize Multi-Key members for this specialized class. + */ + + keyVectorSerializeWriteNoNulls = new VectorSerializeRowNoNulls( + new BinarySortableSerializeWrite(bigTableKeyColumnMap.length)); + keyVectorSerializeWriteNoNulls.init(bigTableKeyTypeNames, bigTableKeyColumnMap); + + currentKeyOutput = new Output(); + saveKeyOutput = new Output(); + + needCommonSetup = false; + } + + if (needHashTableSetup) { + // Setup our hash table specialization. It will be the first time the process + // method is called, or after a Hybrid Grace reload. + + /* + * Get our Multi-Key hash set information for this specialized class. + */ + + hashSet = (VectorMapJoinBytesHashSet) vectorMapJoinHashTable; + + needHashTableSetup = false; + } + + batchCounter++; + + // For left semi joins, we may apply the filter(s) now. + for(VectorExpression ve : bigTableFilterExpressions) { + ve.evaluate(batch); + } + + final int inputLogicalSize = batch.size; + + if (inputLogicalSize == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); + } + return; + } + + // Perform any key expressions. Results will go into scratch columns. + if (bigTableKeyExpressions != null) { + for (VectorExpression ve : bigTableKeyExpressions) { + ve.evaluate(batch); + } + } + + // We rebuild in-place the selected array with rows destine to be forwarded. + int numSel = 0; + + /* + * Multi-Key specific declarations. + */ + + // None. + + /* + * Multi-Key Long check for repeating. + */ + + // If all BigTable input columns to key expressions are isRepeating, then + // calculate key once; lookup once. + boolean allKeyInputColumnsRepeating; + if (bigTableKeyColumnMap.length == 0) { + allKeyInputColumnsRepeating = false; + } else { + allKeyInputColumnsRepeating = true; + for (int i = 0; i < bigTableKeyColumnMap.length; i++) { + if (!batch.cols[bigTableKeyColumnMap[i]].isRepeating) { + allKeyInputColumnsRepeating = false; + break; + } + } + } + + if (allKeyInputColumnsRepeating) { + + /* + * Repeating. + */ + + // All key input columns are repeating. Generate key once. Lookup once. + // Since the key is repeated, we must use entry 0 regardless of selectedInUse. + + /* + * Multi-Key specific repeated lookup. + */ + + keyVectorSerializeWriteNoNulls.setOutput(currentKeyOutput); + keyVectorSerializeWriteNoNulls.serializeWriteNoNulls(batch, 0); + byte[] keyBytes = currentKeyOutput.getData(); + int keyLength = currentKeyOutput.getLength(); + // LOG.debug(CLASS_NAME + " processOp all " + displayBytes(keyBytes, 0, keyLength)); + JoinUtil.JoinResult joinResult = hashSet.contains(keyBytes, 0, keyLength, hashSetResults[0]); + + /* + * Common repeated join result processing. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); + } + numSel = finishLeftSemiRepeated(batch, joinResult, hashSetResults[0]); + } else { + + /* + * NOT Repeating. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); + } + + // We remember any matching rows in matchs / matchSize. At the end of the loop, + // selected / batch.size will represent both matching and non-matching rows for outer join. + // Only deferred rows will have been removed from selected. + int selected[] = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + int hashSetResultCount = 0; + int allMatchCount = 0; + int spillCount = 0; + + /* + * Multi-Key specific variables. + */ + + Output temp; + + // We optimize performance by only looking up the first key in a series of equal keys. + boolean haveSaveKey = false; + JoinUtil.JoinResult saveJoinResult = JoinUtil.JoinResult.NOMATCH; + + // Logical loop over the rows in the batch since the batch may have selected in use. + for (int logical = 0; logical < inputLogicalSize; logical++) { + int batchIndex = (selectedInUse ? selected[logical] : logical); + + /* + * Multi-Key get key. + */ + + // Generate binary sortable key for current row in vectorized row batch. + keyVectorSerializeWriteNoNulls.setOutput(currentKeyOutput); + keyVectorSerializeWriteNoNulls.serializeWriteNoNulls(batch, batchIndex); + + // LOG.debug(CLASS_NAME + " currentKey " + + // VectorizedBatchUtil.displayBytes(currentKeyOutput.getData(), 0, currentKeyOutput.getLength())); + + /* + * Equal key series checking. + */ + + if (!haveSaveKey || !saveKeyOutput.arraysEquals(currentKeyOutput)) { + + // New key. + + if (haveSaveKey) { + // Move on with our counts. + switch (saveJoinResult) { + case MATCH: + // We have extracted the existence from the hash set result, so we don't keep it. + break; + case SPILL: + // We keep the hash set result for its spill information. + hashSetResultCount++; + break; + case NOMATCH: + break; + } + } + + // Regardless of our matching result, we keep that information to make multiple use + // of it for a possible series of equal keys. + haveSaveKey = true; + + /* + * Multi-Key specific save key and lookup. + */ + + temp = saveKeyOutput; + saveKeyOutput = currentKeyOutput; + currentKeyOutput = temp; + + byte[] keyBytes = saveKeyOutput.getData(); + int keyLength = saveKeyOutput.getLength(); + saveJoinResult = hashSet.contains(keyBytes, 0, keyLength, hashSetResults[hashSetResultCount]); + + /* + * Common left-semi join result processing. + */ + + switch (saveJoinResult) { + case MATCH: + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH isSingleValue " + equalKeySeriesIsSingleValue[equalKeySeriesCount] + " currentKey " + currentKey); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashSetResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH" + " currentKey " + currentKey); + break; + } + } else { + // Series of equal keys. + + switch (saveJoinResult) { + case MATCH: + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH duplicate"); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashSetResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH duplicate"); + break; + } + } + } + + if (haveSaveKey) { + // Update our counts for the last key. + switch (saveJoinResult) { + case MATCH: + // We have extracted the existence from the hash set result, so we don't keep it. + break; + case SPILL: + // We keep the hash set result for its spill information. + hashSetResultCount++; + break; + case NOMATCH: + break; + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + + " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) + + " spills " + intArrayToRangesString(spills, spillCount) + + " spillHashMapResultIndices " + intArrayToRangesString(spillHashMapResultIndices, spillCount) + + " hashMapResults " + Arrays.toString(Arrays.copyOfRange(hashSetResults, 0, hashSetResultCount))); + } + + numSel = finishLeftSemi(batch, + allMatchs, allMatchCount, + spills, spillHashMapResultIndices, spillCount, + (VectorMapJoinHashTableResult[]) hashSetResults); + } + + batch.selectedInUse = true; + batch.size = numSel; + + if (batch.size > 0) { + // Forward any remaining selected rows. + forwardBigTableBatch(batch); + } + + } catch (IOException e) { + throw new HiveException(e); + } catch (Exception e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java new file mode 100644 index 0000000..12d663c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java @@ -0,0 +1,351 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTableResult; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +// Single-Column String hash table import. +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashSet; + +// Single-Column String specific imports. +import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr; + +/* + * Specialized class for doing a vectorized map join that is an left semi join on a Single-Column String + * using a hash set. + */ +public class VectorMapJoinLeftSemiStringOperator extends VectorMapJoinLeftSemiGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinInnerBigOnlyLongOperator.class.getName()); + private static final String CLASS_NAME = VectorMapJoinLeftSemiStringOperator.class.getName(); + + // (none) + + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- + + // The hash map for this specialized class. + private transient VectorMapJoinBytesHashSet hashSet; + + //--------------------------------------------------------------------------- + // Single-Column String specific members. + // + + // The column number for this one column join specialization. + private transient int singleJoinColumn; + + //--------------------------------------------------------------------------- + // Pass-thru constructors. + // + + public VectorMapJoinLeftSemiStringOperator() { + super(); + } + + public VectorMapJoinLeftSemiStringOperator(VectorizationContext vContext, OperatorDesc conf) throws HiveException { + super(vContext, conf); + } + + //--------------------------------------------------------------------------- + // Process Single-Column String Left-Semi Join on a vectorized row batch. + // + + @Override + public void process(Object row, int tag) throws HiveException { + + try { + VectorizedRowBatch batch = (VectorizedRowBatch) row; + + alias = (byte) tag; + + if (needCommonSetup) { + // Our one time process method initialization. + commonSetup(batch); + + /* + * Initialize Single-Column String members for this specialized class. + */ + + singleJoinColumn = bigTableKeyColumnMap[0]; + + needCommonSetup = false; + } + + if (needHashTableSetup) { + // Setup our hash table specialization. It will be the first time the process + // method is called, or after a Hybrid Grace reload. + + /* + * Get our Single-Column String hash set information for this specialized class. + */ + + hashSet = (VectorMapJoinBytesHashSet) vectorMapJoinHashTable; + + needHashTableSetup = false; + } + + batchCounter++; + + // For left semi joins, we may apply the filter(s) now. + for(VectorExpression ve : bigTableFilterExpressions) { + ve.evaluate(batch); + } + + final int inputLogicalSize = batch.size; + + if (inputLogicalSize == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); + } + return; + } + + // Perform any key expressions. Results will go into scratch columns. + if (bigTableKeyExpressions != null) { + for (VectorExpression ve : bigTableKeyExpressions) { + ve.evaluate(batch); + } + } + + // We rebuild in-place the selected array with rows destine to be forwarded. + int numSel = 0; + + /* + * Single-Column String specific declarations. + */ + + // The one join column for this specialized class. + BytesColumnVector joinColVector = (BytesColumnVector) batch.cols[singleJoinColumn]; + byte[][] vector = joinColVector.vector; + int[] start = joinColVector.start; + int[] length = joinColVector.length; + + /* + * Single-Column Long check for repeating. + */ + + // Check single column for repeating. + boolean allKeyInputColumnsRepeating = joinColVector.isRepeating; + + if (allKeyInputColumnsRepeating) { + + /* + * Repeating. + */ + + // All key input columns are repeating. Generate key once. Lookup once. + // Since the key is repeated, we must use entry 0 regardless of selectedInUse. + + /* + * Single-Column String specific repeated lookup. + */ + + byte[] keyBytes = vector[0]; + int keyStart = start[0]; + int keyLength = length[0]; + JoinUtil.JoinResult joinResult = hashSet.contains(keyBytes, keyStart, keyLength, hashSetResults[0]); + + /* + * Common repeated join result processing. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); + } + numSel = finishLeftSemiRepeated(batch, joinResult, hashSetResults[0]); + } else { + + /* + * NOT Repeating. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); + } + + // We remember any matching rows in matchs / matchSize. At the end of the loop, + // selected / batch.size will represent both matching and non-matching rows for outer join. + // Only deferred rows will have been removed from selected. + int selected[] = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + int hashSetResultCount = 0; + int allMatchCount = 0; + int spillCount = 0; + + /* + * Single-Column String specific variables. + */ + + int saveKeyBatchIndex = -1; + + // We optimize performance by only looking up the first key in a series of equal keys. + boolean haveSaveKey = false; + JoinUtil.JoinResult saveJoinResult = JoinUtil.JoinResult.NOMATCH; + + // Logical loop over the rows in the batch since the batch may have selected in use. + for (int logical = 0; logical < inputLogicalSize; logical++) { + int batchIndex = (selectedInUse ? selected[logical] : logical); + + /* + * Single-Column String get key. + */ + + // Implicit -- use batchIndex. + + /* + * Equal key series checking. + */ + + if (!haveSaveKey || + StringExpr.compare(vector[saveKeyBatchIndex], start[saveKeyBatchIndex], length[saveKeyBatchIndex], + vector[batchIndex], start[batchIndex], length[batchIndex]) != 0) { + + // New key. + + if (haveSaveKey) { + // Move on with our counts. + switch (saveJoinResult) { + case MATCH: + // We have extracted the existence from the hash set result, so we don't keep it. + break; + case SPILL: + // We keep the hash set result for its spill information. + hashSetResultCount++; + break; + case NOMATCH: + break; + } + } + + // Regardless of our matching result, we keep that information to make multiple use + // of it for a possible series of equal keys. + haveSaveKey = true; + + /* + * Single-Column String specific save key and lookup. + */ + + saveKeyBatchIndex = batchIndex; + + byte[] keyBytes = vector[batchIndex]; + int keyStart = start[batchIndex]; + int keyLength = length[batchIndex]; + saveJoinResult = hashSet.contains(keyBytes, keyStart, keyLength, hashSetResults[hashSetResultCount]); + + /* + * Common left-semi join result processing. + */ + + switch (saveJoinResult) { + case MATCH: + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH isSingleValue " + equalKeySeriesIsSingleValue[equalKeySeriesCount] + " currentKey " + currentKey); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashSetResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH" + " currentKey " + currentKey); + break; + } + } else { + // Series of equal keys. + + switch (saveJoinResult) { + case MATCH: + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH duplicate"); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashSetResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH duplicate"); + break; + } + } + } + + if (haveSaveKey) { + // Update our counts for the last key. + switch (saveJoinResult) { + case MATCH: + // We have extracted the existence from the hash set result, so we don't keep it. + break; + case SPILL: + // We keep the hash set result for its spill information. + hashSetResultCount++; + break; + case NOMATCH: + break; + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + + " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) + + " spills " + intArrayToRangesString(spills, spillCount) + + " spillHashMapResultIndices " + intArrayToRangesString(spillHashMapResultIndices, spillCount) + + " hashMapResults " + Arrays.toString(Arrays.copyOfRange(hashSetResults, 0, hashSetResultCount))); + } + + numSel = finishLeftSemi(batch, + allMatchs, allMatchCount, + spills, spillHashMapResultIndices, spillCount, + (VectorMapJoinHashTableResult[]) hashSetResults); + } + + batch.selectedInUse = true; + batch.size = numSel; + + if (batch.size > 0) { + // Forward any remaining selected rows. + forwardBigTableBatch(batch); + } + + } catch (IOException e) { + throw new HiveException(e); + } catch (Exception e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java new file mode 100644 index 0000000..efdf0b2 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java @@ -0,0 +1,690 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMap; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMapResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTableResult; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.serde2.WriteBuffers.ByteSegmentRef; + +/** + * This class has methods for generating vectorized join results for outer joins. + * + * The big difference between inner joins and outer joins is the treatment of null and non-matching + * keys. + * + * Inner joins ignore null keys. Outer joins include big table rows with null keys in the result. + * + * (Left non-full) outer joins include big table rows that do not match the small table. Small + * table columns for non-matches will be NULL. + * + * Another important difference is filtering. For outer joins to include the necessary rows, + * filtering must be done after the hash table lookup. That is because filtering does not + * eliminate rows, but changes them from match to non-matching rows. They will still appear in + * the join result. + * + * One vector outer join optimization is referencing bytes outer keys. When a bytes key appears + * in the small table results area, instead of copying the bytes key we reference the big table key. + * Bytes column vectors allow a by reference entry to bytes. It is safe to do a by reference + * since it is within the same row. + * + * Outer join uses a hash map since small table columns can be included in the join result. + */ +public abstract class VectorMapJoinOuterGenerateResultOperator + extends VectorMapJoinGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinOuterGenerateResultOperator.class.getName()); + + //--------------------------------------------------------------------------- + // Outer join specific members. + // + + // An array of hash map results so we can do lookups on the whole batch before output result + // generation. + protected transient VectorMapJoinHashMapResult hashMapResults[]; + + // Pre-allocated member for storing any matching row indexes during a processOp call. + protected transient int[] matchs; + + // Pre-allocated member for storing the mapping to the row batchIndex of the first of a series of + // equal keys that was looked up during a processOp call. + protected transient int[] matchHashMapResultIndices; + + // All matching and non-matching big table rows. + protected transient int[] nonSpills; + + // Pre-allocated member for storing the (physical) batch index of rows that need to be spilled. + protected transient int[] spills; + + // Pre-allocated member for storing index into the hashSetResults for each spilled row. + protected transient int[] spillHashMapResultIndices; + + // Pre-allocated member for storing any non-matching row indexes during a processOp call. + protected transient int[] scratch1; + + public VectorMapJoinOuterGenerateResultOperator() { + super(); + } + + public VectorMapJoinOuterGenerateResultOperator(VectorizationContext vContext, OperatorDesc conf) + throws HiveException { + super(vContext, conf); + } + + /* + * Setup our outer join specific members. + */ + protected void commonSetup(VectorizedRowBatch batch) throws HiveException { + super.commonSetup(batch); + + // Outer join specific. + VectorMapJoinHashMap baseHashMap = (VectorMapJoinHashMap) vectorMapJoinHashTable; + + hashMapResults = new VectorMapJoinHashMapResult[batch.DEFAULT_SIZE]; + for (int i = 0; i < hashMapResults.length; i++) { + hashMapResults[i] = baseHashMap.createHashMapResult(); + } + matchs = new int[batch.DEFAULT_SIZE]; + matchHashMapResultIndices = new int[batch.DEFAULT_SIZE]; + nonSpills = new int[batch.DEFAULT_SIZE]; + spills = new int[batch.DEFAULT_SIZE]; + spillHashMapResultIndices = new int[batch.DEFAULT_SIZE]; + scratch1 = new int[batch.DEFAULT_SIZE]; + } + + //----------------------------------------------------------------------------------------------- + + /* + * Outer join (hash map). + */ + + /** + * Generate the outer join output results for one vectorized row batch. + * + * Any filter expressions will apply now since hash map lookup for outer join is complete. + * + * @param batch + * The big table batch with any matching and any non matching rows both as + * selected in use. + * @param matchs + * A subset of the rows of the batch that are matches. + * @param matchHashMapResultIndices + * For each entry in matches, the index into the hashMapResult. + * @param matchSize + * Number of matches in matchs. + * @param nonSpills + * The rows of the batch that are both matches and non-matches. + * @param nonspillCount + * Number of rows in nonSpills. + * @param spills + * A subset of the rows of the batch that are spills. + * @param spillHashMapResultIndices + * For each entry in spills, the index into the hashMapResult. + * @param spillCount + * Number of spills in spills. + * @param hashMapResults + * The array of all hash map results for the batch. + * @param hashMapResultCount + * Number of entries in hashMapResults. + * @param scratch1 + * Pre-allocated storage to internal use. + */ + public int finishOuter(VectorizedRowBatch batch, + int[] matchs, int[] matchHashMapResultIndices, int matchCount, + int[] nonSpills, int nonSpillCount, + int[] spills, int[] spillHashMapResultIndices, int spillCount, + VectorMapJoinHashMapResult[] hashMapResults, int hashMapResultCount, + int[] scratch1) throws IOException, HiveException { + + int numSel = 0; + + // At this point we have determined the matching rows only for the ON equality condition(s). + // Implicitly, non-matching rows are those in the selected array minus matchs. + + // Next, for outer join, apply any ON predicates to filter down the matches. + if (matchCount > 0 && bigTableFilterExpressions.length > 0) { + + System.arraycopy(matchs, 0, batch.selected, 0, matchCount); + batch.size = matchCount; + + // Non matches will be removed from the selected array. + for (VectorExpression ve : bigTableFilterExpressions) { + ve.evaluate(batch); + } + + // LOG.info("finishOuter" + + // " filtered batch.selected " + Arrays.toString(Arrays.copyOfRange(batch.selected, 0, batch.size))); + + // Fixup the matchHashMapResultIndices array. + if (batch.size < matchCount) { + int numMatch = 0; + int[] selected = batch.selected; + for (int i = 0; i < batch.size; i++) { + if (selected[i] == matchs[numMatch]) { + matchHashMapResultIndices[numMatch] = matchHashMapResultIndices[i]; + numMatch++; + if (numMatch == matchCount) { + break; + } + } + } + System.arraycopy(batch.selected, 0, matchs, 0, matchCount); + } + } + // LOG.info("finishOuter" + + // " matchs[" + matchCount + "] " + intArrayToRangesString(matchs, matchCount) + + // " matchHashMapResultIndices " + Arrays.toString(Arrays.copyOfRange(matchHashMapResultIndices, 0, matchCount))); + + // Big table value expressions apply to ALL matching and non-matching rows. + if (bigTableValueExpressions != null) { + + System.arraycopy(nonSpills, 0, batch.selected, 0, nonSpillCount); + batch.size = nonSpillCount; + + for (VectorExpression ve: bigTableValueExpressions) { + ve.evaluate(batch); + } + } + + // Determine which rows are non matches by determining the delta between selected and + // matchs. + int[] noMatchs = scratch1; + int noMatchCount = 0; + if (matchCount < nonSpillCount) { + // Determine which rows are non matches. + int matchIndex = 0; + for (int i = 0; i < nonSpillCount; i++) { + int candidateIndex = nonSpills[i]; + if (matchIndex < matchCount && candidateIndex == matchs[matchIndex]) { + matchIndex++; + } else { + noMatchs[noMatchCount++] = candidateIndex; + } + } + } + // LOG.info("finishOuter" + + // " noMatchs[" + noMatchCount + "] " + intArrayToRangesString(noMatchs, noMatchCount)); + + + // When we generate results into the overflow batch, we may still end up with fewer rows + // in the big table batch. So, nulSel and the batch's selected array will be rebuilt with + // just the big table rows that need to be forwarded, minus any rows processed with the + // overflow batch. + if (matchCount > 0) { + numSel = generateOuterHashMapMatchResults(batch, + matchs, matchHashMapResultIndices, matchCount, + hashMapResults, numSel); + } + + if (noMatchCount > 0) { + numSel = generateOuterHashMapNoMatchResults(batch, noMatchs, noMatchCount, numSel); + } + + if (spillCount > 0) { + spillHashMapBatch(batch, (VectorMapJoinHashTableResult[]) hashMapResults, + spills, spillHashMapResultIndices, spillCount); + } + + return numSel; + } + + /** + * Generate the matching outer join output results for one row of a vectorized row batch into + * the overflow batch. + * + * @param batch + * The big table batch. + * @param batchIndex + * Index of the big table row. + * @param hashMapResult + * The hash map result with the small table values. + */ + private void copyOuterHashMapResultToOverflow(VectorizedRowBatch batch, int batchIndex, + VectorMapJoinHashMapResult hashMapResult) throws HiveException, IOException { + + // if (hashMapResult.isCappedCountAvailable()) { + // LOG.info("copyOuterHashMapResultToOverflow cappedCount " + hashMapResult.cappedCount()); + // } + ByteSegmentRef byteSegmentRef = hashMapResult.first(); + while (byteSegmentRef != null) { + + // Copy the BigTable values into the overflow batch. Since the overflow batch may + // not get flushed here, we must copy by value. + if (bigTableRetainedVectorCopy != null) { + bigTableRetainedVectorCopy.copyByValue(batch, batchIndex, + overflowBatch, overflowBatch.size); + } + + // Reference the keys we just copied above. + if (bigTableVectorCopyOuterKeys != null) { + bigTableVectorCopyOuterKeys.copyByReference(overflowBatch, overflowBatch.size, + overflowBatch, overflowBatch.size); + } + + if (smallTableVectorDeserializeRow != null) { + + byte[] bytes = byteSegmentRef.getBytes(); + int offset = (int) byteSegmentRef.getOffset(); + int length = byteSegmentRef.getLength(); + smallTableVectorDeserializeRow.setBytes(bytes, offset, length); + + smallTableVectorDeserializeRow.deserializeByValue(overflowBatch, overflowBatch.size); + } + + ++overflowBatch.size; + if (overflowBatch.size == VectorizedRowBatch.DEFAULT_SIZE) { + forwardOverflow(); + } + + byteSegmentRef = hashMapResult.next(); + } + // LOG.info("copyOuterHashMapResultToOverflow overflowBatch.size " + overflowBatch.size); + + } + + /** + * Generate the matching outer join output results for one vectorized row batch. + * + * For each matching row specified by parameter, get the one or more small table values and + * form join results. + * + * (Note: Since all matching and non-matching rows are selected and output for outer joins, + * we cannot use selected as the matching rows). + * + * @param batch + * The big table batch with any matching and any non matching rows both as + * selected in use. + * @param matchs + * A subset of the rows of the batch that are matches. + * @param matchHashMapResultIndices + * For each entry in matches, the index into the hashMapResult. + * @param matchSize + * Number of matches in matchs. + * @param hashMapResults + * The array of all hash map results for the batch. + * @param numSel + * The current count of rows in the rebuilding of the selected array. + * + * @return + * The new count of selected rows. + */ + protected int generateOuterHashMapMatchResults(VectorizedRowBatch batch, + int[] matchs, int[] matchHashMapResultIndices, int matchSize, + VectorMapJoinHashMapResult[] hashMapResults, int numSel) + throws IOException, HiveException { + + int[] selected = batch.selected; + + if (onlyUseOverflowBatch) { + + // Do not use the big table batch for output results. Copy everything to the overflow batch. + + for (int i = 0; i < matchSize; i++) { + int batchIndex = matchs[i]; + + int hashMapResultIndex = matchHashMapResultIndices[i]; + VectorMapJoinHashMapResult hashMapResult = hashMapResults[hashMapResultIndex]; + + // TODO: We could try to look ahead and see equal keys and do a N x M generate. + + copyOuterHashMapResultToOverflow(batch, batchIndex, hashMapResult); + } + + // Unchanged (we did not remember anything in the selected array). + return numSel; + } + + // Attempt to generate result within big table batch. + + for (int i = 0; i < matchSize; i++) { + int batchIndex = matchs[i]; + + int hashMapResultIndex = matchHashMapResultIndices[i]; + VectorMapJoinHashMapResult hashMapResult = hashMapResults[hashMapResultIndex]; + + if (!hashMapResult.isSingleRow()) { + + // Multiple small table rows require use of the overflow batch. + copyOuterHashMapResultToOverflow(batch, batchIndex, hashMapResult); + } else { + + // Generate join result in big table batch. + ByteSegmentRef byteSegmentRef = hashMapResult.first(); + + if (bigTableVectorCopyOuterKeys != null) { + bigTableVectorCopyOuterKeys.copyByReference(batch, batchIndex, batch, batchIndex); + } + + if (smallTableVectorDeserializeRow != null) { + + byte[] bytes = byteSegmentRef.getBytes(); + int offset = (int) byteSegmentRef.getOffset(); + int length = byteSegmentRef.getLength(); + smallTableVectorDeserializeRow.setBytes(bytes, offset, length); + + smallTableVectorDeserializeRow.deserializeByValue(batch, batchIndex); + } + + // Remember this big table row was used for an output result. + selected[numSel++] = batchIndex; + } + } + return numSel; + } + + /** + * Generate the non matching outer join output results for one vectorized row batch. + * + * For each non matching row specified by parameter, generate nulls for the small table results. + * + * @param batch + * The big table batch with any matching and any non matching rows both as + * selected in use. + * @param noMatchs + * A subset of the rows of the batch that are non matches. + * @param noMatchSize + * Number of non matches in noMatchs. + * @param numSel + * The current count of rows in the rebuilding of the selected array. + * + * @return + * The new count of selected rows. + */ + protected int generateOuterHashMapNoMatchResults(VectorizedRowBatch batch, int[] noMatchs, + int noMatchSize, int numSel) throws IOException, HiveException { + int[] selected = batch.selected; + + if (onlyUseOverflowBatch) { + + // Do not use the big table batch for output results. Copy everything to the overflow batch. + + for (int i = 0; i < noMatchSize; i++) { + int batchIndex = noMatchs[i]; + + // Copy the BigTable values into the overflow batch. Since the overflow batch may + // not get flushed here, we must copy by value. + if (bigTableRetainedVectorCopy != null) { + bigTableRetainedVectorCopy.copyByValue(batch, batchIndex, overflowBatch, overflowBatch.size); + } + + // Mark any scratch small table scratch columns that would normally receive a copy of the + // key as null, too. + for (int column : bigTableOuterKeyMapping.getOutputColumns()) { + ColumnVector colVector = overflowBatch.cols[column]; + colVector.noNulls = false; + colVector.isNull[overflowBatch.size] = true; + } + + // Small table values are set to null. + for (int column : smallTableMapping.getOutputColumns()) { + ColumnVector colVector = overflowBatch.cols[column]; + colVector.noNulls = false; + colVector.isNull[overflowBatch.size] = true; + } + + ++overflowBatch.size; + if (overflowBatch.size == VectorizedRowBatch.DEFAULT_SIZE) { + forwardOverflow(); + } + } + + return numSel; + } + + // Generate result within big table batch with null small table results, using isRepeated + // if possible. + + if (numSel == 0) { + + // There were 0 matching rows -- so we can use the isRepeated optimization for the non + // matching rows. + + // Mark any scratch small table scratch columns that would normally receive a copy of the + // key as null and repeating. + for (int column : bigTableOuterKeyMapping.getOutputColumns()) { + ColumnVector colVector = batch.cols[column]; + colVector.isRepeating = true; + colVector.noNulls = false; + colVector.isNull[0] = true; + } + + // Small table values are set to null and repeating. + for (int column : smallTableMapping.getOutputColumns()) { + ColumnVector colVector = batch.cols[column]; + colVector.isRepeating = true; + colVector.noNulls = false; + colVector.isNull[0] = true; + } + + // Rebuild the selected array. + for (int i = 0; i < noMatchSize; i++) { + int batchIndex = noMatchs[i]; + selected[numSel++] = batchIndex; + } + } else { + + // Set null information in the small table results area. + + for (int i = 0; i < noMatchSize; i++) { + int batchIndex = noMatchs[i]; + + // Mark any scratch small table scratch columns that would normally receive a copy of the + // key as null, too. + for (int column : bigTableOuterKeyMapping.getOutputColumns()) { + ColumnVector colVector = batch.cols[column]; + colVector.noNulls = false; + colVector.isNull[batchIndex] = true; + } + + // Small table values are set to null. + for (int column : smallTableMapping.getOutputColumns()) { + ColumnVector colVector = batch.cols[column]; + colVector.noNulls = false; + colVector.isNull[batchIndex] = true; + } + + selected[numSel++] = batchIndex; + } + } + return numSel; + } + + /** + * Generate the outer join output results for one vectorized row batch with a repeated key. + * + * Any filter expressions will apply now since hash map lookup for outer join is complete. + * + * @param batch + * The big table batch with any matching and any non matching rows both as + * selected in use. + * @param joinResult + * The hash map lookup result for the repeated key. + * @param hashMapResults + * The array of all hash map results for the batch. + * @param scratch1 + * Pre-allocated storage to internal use. + */ + public int finishOuterRepeated(VectorizedRowBatch batch, JoinUtil.JoinResult joinResult, + VectorMapJoinHashMapResult hashMapResult, int[] scratch1) + throws IOException, HiveException { + + int numSel = 0; + + if (joinResult == JoinUtil.JoinResult.MATCH && bigTableFilterExpressions.length > 0) { + + // Since it is repeated, the evaluation of the filter will knock the whole batch out. + // But since we are doing outer join, we want to keep non-matches. + + // First, remember selected; + int[] rememberSelected = scratch1; + int rememberBatchSize = batch.size; + if (batch.selectedInUse) { + System.arraycopy(batch.selected, 0, rememberSelected, 0, batch.size); + } + + // Filter. + for (VectorExpression ve : bigTableFilterExpressions) { + ve.evaluate(batch); + } + + // Convert a filter out to a non match. + if (batch.size == 0) { + joinResult = JoinUtil.JoinResult.NOMATCH; + if (batch.selectedInUse) { + System.arraycopy(rememberSelected, 0, batch.selected, 0, rememberBatchSize); + // LOG.info("finishOuterRepeated batch #" + batchCounter + " filter out converted to no matchs " + + // Arrays.toString(Arrays.copyOfRange(batch.selected, 0, rememberBatchSize))); + } else { + // LOG.info("finishOuterRepeated batch #" + batchCounter + " filter out converted to no matchs batch size " + + // rememberBatchSize); + } + batch.size = rememberBatchSize; + } + } + + // LOG.info("finishOuterRepeated batch #" + batchCounter + " " + joinResult.name() + " batch.size " + batch.size); + switch (joinResult) { + case MATCH: + // Run our value expressions over whole batch. + if (bigTableValueExpressions != null) { + for(VectorExpression ve: bigTableValueExpressions) { + ve.evaluate(batch); + } + } + + // Use a common method applicable for inner and outer. + numSel = generateHashMapResultRepeatedAll(batch, hashMapResult); + break; + case SPILL: + // Whole batch is spilled. + spillBatchRepeated(batch, (VectorMapJoinHashTableResult) hashMapResult); + break; + case NOMATCH: + // Run our value expressions over whole batch. + if (bigTableValueExpressions != null) { + for(VectorExpression ve: bigTableValueExpressions) { + ve.evaluate(batch); + } + } + + numSel = generateOuterNullsRepeatedAll(batch); + break; + } + + return numSel; + } + + /** + * Generate the non-match outer join output results for the whole repeating vectorized + * row batch. + * + * Each row will get nulls for all small table values. + * + * @param batch + * The big table batch. + * @return + * The new count of selected rows. + */ + protected int generateOuterNullsRepeatedAll(VectorizedRowBatch batch) throws HiveException { + + int[] selected = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + if (onlyUseOverflowBatch) { + + for (int i = 0; i < batch.size; i++) { + int index = (selectedInUse ? selected[i] : i); + + // Copy the BigTable values into the overflow batch. Since the overflow batch may + // not get flushed here, we must copy by value. + if (bigTableRetainedVectorCopy != null) { + bigTableRetainedVectorCopy.copyByValue(batch, index, overflowBatch, overflowBatch.size); + } + + // Mark any scratch small table scratch columns that would normally receive a copy of the key + // as null, too. + for (int column : bigTableOuterKeyMapping.getOutputColumns()) { + ColumnVector colVector = overflowBatch.cols[column]; + colVector.noNulls = false; + colVector.isNull[overflowBatch.size] = true; + } + + for (int column : smallTableMapping.getOutputColumns()) { + ColumnVector colVector = overflowBatch.cols[column]; + colVector.noNulls = false; + colVector.isNull[overflowBatch.size] = true; + } + + ++overflowBatch.size; + if (overflowBatch.size == VectorizedRowBatch.DEFAULT_SIZE) { + forwardOverflow(); + } + } + // LOG.info("generateOuterNullsRepeatedAll " + batch.size); + + // We only used the overflow batch. + return 0; + } + + // Generate result within big table batch using is repeated for null small table results. + + if (batch.selectedInUse) { + // The selected array is already filled in as we want it. + } else { + for (int i = 0; i < batch.size; i++) { + selected[i] = i; + } + batch.selectedInUse = true; + } + + for (int column : smallTableMapping.getOutputColumns()) { + ColumnVector colVector = batch.cols[column]; + colVector.noNulls = false; + colVector.isNull[0] = true; + colVector.isRepeating = true; + } + + // Mark any scratch small table scratch columns that would normally receive a copy of the key + // as null, too. + for (int column : bigTableOuterKeyMapping.getOutputColumns()) { + ColumnVector colVector = batch.cols[column]; + colVector.noNulls = false; + colVector.isNull[0] = true; + colVector.isRepeating = true; + } + + // for (int i = 0; i < batch.size; i++) { + // int bigTableIndex = selected[i]; + // VectorizedBatchUtil.debugDisplayOneRow(batch, bigTableIndex, taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator generate generateOuterNullsRepeatedAll batch"); + // } + + return batch.size; + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java new file mode 100644 index 0000000..8f18672 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java @@ -0,0 +1,376 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +// Single-Column Long hash table import. +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinLongHashMap; + +// Single-Column Long specific imports. +import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; + +/* + * Specialized class for doing a vectorized map join that is an outer join on a Single-Column Long + * using a hash map. + */ +public class VectorMapJoinOuterLongOperator extends VectorMapJoinOuterGenerateResultOperator { + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinOuterLongOperator.class.getName()); + private static final String CLASS_NAME = VectorMapJoinOuterLongOperator.class.getName(); + + // (none) + + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- + + // The hash map for this specialized class. + private transient VectorMapJoinLongHashMap hashMap; + + //--------------------------------------------------------------------------- + // Single-Column Long specific members. + // + + // For integers, we have optional min/max filtering. + private transient boolean useMinMax; + private transient long min; + private transient long max; + + // The column number for this one column join specialization. + private transient int singleJoinColumn; + + //--------------------------------------------------------------------------- + // Pass-thru constructors. + // + + public VectorMapJoinOuterLongOperator() { + super(); + } + + public VectorMapJoinOuterLongOperator(VectorizationContext vContext, OperatorDesc conf) throws HiveException { + super(vContext, conf); + } + + //--------------------------------------------------------------------------- + // Process Single-Column Long Outer Join on a vectorized row batch. + // + + @Override + public void process(Object row, int tag) throws HiveException { + + try { + VectorizedRowBatch batch = (VectorizedRowBatch) row; + + alias = (byte) tag; + + if (needCommonSetup) { + // Our one time process method initialization. + commonSetup(batch); + + /* + * Initialize Single-Column Long members for this specialized class. + */ + + singleJoinColumn = bigTableKeyColumnMap[0]; + + needCommonSetup = false; + } + + if (needHashTableSetup) { + // Setup our hash table specialization. It will be the first time the process + // method is called, or after a Hybrid Grace reload. + + /* + * Get our Single-Column Long hash map information for this specialized class. + */ + + hashMap = (VectorMapJoinLongHashMap) vectorMapJoinHashTable; + useMinMax = hashMap.useMinMax(); + if (useMinMax) { + min = hashMap.min(); + max = hashMap.max(); + } + + needHashTableSetup = false; + } + + batchCounter++; + + // For outer join, DO NOT apply filters yet. It is incorrect for outer join to + // apply the filter before hash table matching. + + final int inputLogicalSize = batch.size; + + if (inputLogicalSize == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); + } + return; + } + + // Perform any key expressions. Results will go into scratch columns. + if (bigTableKeyExpressions != null) { + for (VectorExpression ve : bigTableKeyExpressions) { + ve.evaluate(batch); + } + } + + // We rebuild in-place the selected array with rows destine to be forwarded. + int numSel = 0; + + /* + * Single-Column Long specific declarations. + */ + + // The one join column for this specialized class. + LongColumnVector joinColVector = (LongColumnVector) batch.cols[singleJoinColumn]; + long[] vector = joinColVector.vector; + + /* + * Single-Column Long check for repeating. + */ + + // Check single column for repeating. + boolean allKeyInputColumnsRepeating = joinColVector.isRepeating; + + if (allKeyInputColumnsRepeating) { + + /* + * Repeating. + */ + + // All key input columns are repeating. Generate key once. Lookup once. + // Since the key is repeated, we must use entry 0 regardless of selectedInUse. + + /* + * Single-Column Long specific repeated lookup. + */ + + JoinUtil.JoinResult joinResult; + if (!joinColVector.noNulls && joinColVector.isNull[0]) { + // Null key is no match for whole batch. + joinResult = JoinUtil.JoinResult.NOMATCH; + } else { + // Handle *repeated* join key, if found. + long key = vector[0]; + if (useMinMax && (key < min || key > max)) { + // Out of range for whole batch. + joinResult = JoinUtil.JoinResult.NOMATCH; + } else { + joinResult = hashMap.lookup(key, hashMapResults[0]); + } + } + + /* + * Common repeated join result processing. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); + } + numSel = finishOuterRepeated(batch, joinResult, hashMapResults[0], scratch1); + } else { + + /* + * NOT Repeating. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); + } + + int selected[] = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + // For outer join we must apply the filter after match and cause some matches to become + // non-matches, we do not track non-matches here. Instead we remember all non spilled rows + // and compute non matches later in finishOuter. + int hashMapResultCount = 0; + int matchCount = 0; + int nonSpillCount = 0; + int spillCount = 0; + + /* + * Single-Column Long specific variables. + */ + + long saveKey = 0; + + // We optimize performance by only looking up the first key in a series of equal keys. + boolean haveSaveKey = false; + JoinUtil.JoinResult saveJoinResult = JoinUtil.JoinResult.NOMATCH; + + // Logical loop over the rows in the batch since the batch may have selected in use. + for (int logical = 0; logical < inputLogicalSize; logical++) { + int batchIndex = (selectedInUse ? selected[logical] : logical); + + /* + * Single-Column Long outer null detection. + */ + + boolean isNull = !joinColVector.noNulls && joinColVector.isNull[batchIndex]; + + if (isNull) { + + // Have that the NULL does not interfere with the current equal key series, if there + // is one. We do not set saveJoinResult. + // + // Let a current MATCH equal key series keep going, or + // Let a current SPILL equal key series keep going, or + // Let a current NOMATCH keep not matching. + + // Remember non-matches for Outer Join. + nonSpills[nonSpillCount++] = batchIndex; + // LOG.debug(CLASS_NAME + " logical " + logical + " batchIndex " + batchIndex + " NULL"); + } else { + + /* + * Single-Column Long outer get key. + */ + + long currentKey = vector[batchIndex]; + + /* + * Equal key series checking. + */ + + if (!haveSaveKey || currentKey != saveKey) { + // New key. + + if (haveSaveKey) { + // Move on with our count(s). + switch (saveJoinResult) { + case MATCH: + case SPILL: + hashMapResultCount++; + break; + case NOMATCH: + break; + } + } + + // Regardless of our matching result, we keep that information to make multiple use + // of it for a possible series of equal keys. + haveSaveKey = true; + + /* + * Single-Column Long specific save key. + */ + + saveKey = currentKey; + + /* + * Single-Column Long specific lookup key. + */ + + if (useMinMax && (currentKey < min || currentKey > max)) { + // Key out of range for whole hash table. + saveJoinResult = JoinUtil.JoinResult.NOMATCH; + } else { + saveJoinResult = hashMap.lookup(currentKey, hashMapResults[hashMapResultCount]); + } + // LOG.debug(CLASS_NAME + " logical " + logical + " batchIndex " + batchIndex + " New Key " + saveJoinResult.name()); + } else { + // LOG.debug(CLASS_NAME + " logical " + logical + " batchIndex " + batchIndex + " Key Continues " + saveJoinResult.name()); + } + + /* + * Common outer join result processing. + */ + + switch (saveJoinResult) { + case MATCH: + matchs[matchCount] = batchIndex; + matchHashMapResultIndices[matchCount] = hashMapResultCount; + matchCount++; + nonSpills[nonSpillCount++] = batchIndex; + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashMapResultCount; + spillCount++; + break; + + case NOMATCH: + nonSpills[nonSpillCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH duplicate"); + break; + } + } + } + + if (haveSaveKey) { + // Account for last equal key sequence. + switch (saveJoinResult) { + case MATCH: + case SPILL: + hashMapResultCount++; + break; + case NOMATCH: + break; + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + + " matchs " + intArrayToRangesString(matchs, matchCount) + + " matchHashMapResultIndices " + intArrayToRangesString(matchHashMapResultIndices, matchCount) + + " nonSpills " + intArrayToRangesString(nonSpills, nonSpillCount) + + " spills " + intArrayToRangesString(spills, spillCount) + + " spillHashMapResultIndices " + intArrayToRangesString(spillHashMapResultIndices, spillCount) + + " hashMapResults " + Arrays.toString(Arrays.copyOfRange(hashMapResults, 0, hashMapResultCount))); + } + + // We will generate results for all matching and non-matching rows. + // Note that scratch1 is undefined at this point -- it's preallocated storage. + numSel = finishOuter(batch, + matchs, matchHashMapResultIndices, matchCount, + nonSpills, nonSpillCount, + spills, spillHashMapResultIndices, spillCount, + hashMapResults, hashMapResultCount, + scratch1); + } + + batch.selectedInUse = true; + batch.size = numSel; + + if (batch.size > 0) { + // Forward any remaining selected rows. + forwardBigTableBatch(batch); + } + + } catch (IOException e) { + throw new HiveException(e); + } catch (Exception e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java new file mode 100644 index 0000000..ffee959 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java @@ -0,0 +1,398 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +// Multi-Key hash table import. +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashMap; + +// Multi-Key specific imports. +import org.apache.hadoop.hive.ql.exec.vector.VectorSerializeRow; +import org.apache.hadoop.hive.serde2.ByteStream.Output; +import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableSerializeWrite; + +/* + * Specialized class for doing a vectorized map join that is an outer join on Multi-Key + * using a hash map. + */ +public class VectorMapJoinOuterMultiKeyOperator extends VectorMapJoinOuterGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinOuterMultiKeyOperator.class.getName()); + private static final String CLASS_NAME = VectorMapJoinOuterMultiKeyOperator.class.getName(); + + // (none) + + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- + + // The hash map for this specialized class. + private transient VectorMapJoinBytesHashMap hashMap; + + //--------------------------------------------------------------------------- + // Multi-Key specific members. + // + + // Object that can take a set of columns in row in a vectorized row batch and serialized it. + private transient VectorSerializeRow keyVectorSerializeWrite; + + // The BinarySortable serialization of the current key. + private transient Output currentKeyOutput; + + // The BinarySortable serialization of the saved key for a possible series of equal keys. + private transient Output saveKeyOutput; + + //--------------------------------------------------------------------------- + // Pass-thru constructors. + // + + public VectorMapJoinOuterMultiKeyOperator() { + super(); + } + + public VectorMapJoinOuterMultiKeyOperator(VectorizationContext vContext, OperatorDesc conf) throws HiveException { + super(vContext, conf); + } + + //--------------------------------------------------------------------------- + // Process Multi-Key Outer Join on a vectorized row batch. + // + + @Override + public void process(Object row, int tag) throws HiveException { + + try { + VectorizedRowBatch batch = (VectorizedRowBatch) row; + + alias = (byte) tag; + + if (needCommonSetup) { + // Our one time process method initialization. + commonSetup(batch); + + /* + * Initialize Multi-Key members for this specialized class. + */ + + keyVectorSerializeWrite = new VectorSerializeRow( + new BinarySortableSerializeWrite(bigTableKeyColumnMap.length)); + keyVectorSerializeWrite.init(bigTableKeyTypeNames, bigTableKeyColumnMap); + + currentKeyOutput = new Output(); + saveKeyOutput = new Output(); + + needCommonSetup = false; + } + + if (needHashTableSetup) { + // Setup our hash table specialization. It will be the first time the process + // method is called, or after a Hybrid Grace reload. + + /* + * Get our Multi-Key hash map information for this specialized class. + */ + + hashMap = (VectorMapJoinBytesHashMap) vectorMapJoinHashTable; + + needHashTableSetup = false; + } + + batchCounter++; + + // For outer join, DO NOT apply filters yet. It is incorrect for outer join to + // apply the filter before hash table matching. + + final int inputLogicalSize = batch.size; + + if (inputLogicalSize == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); + } + return; + } + + // Perform any key expressions. Results will go into scratch columns. + if (bigTableKeyExpressions != null) { + for (VectorExpression ve : bigTableKeyExpressions) { + ve.evaluate(batch); + } + } + + // We rebuild in-place the selected array with rows destine to be forwarded. + int numSel = 0; + + /* + * Multi-Key specific declarations. + */ + + // None. + + /* + * Multi-Key Long check for repeating. + */ + + // If all BigTable input columns to key expressions are isRepeating, then + // calculate key once; lookup once. + // Also determine if any nulls are present since for a join that means no match. + boolean allKeyInputColumnsRepeating; + boolean someKeyInputColumnIsNull = false; // Only valid if allKeyInputColumnsRepeating is true. + if (bigTableKeyColumnMap.length == 0) { + allKeyInputColumnsRepeating = false; + } else { + allKeyInputColumnsRepeating = true; + for (int i = 0; i < bigTableKeyColumnMap.length; i++) { + ColumnVector colVector = batch.cols[bigTableKeyColumnMap[i]]; + if (!colVector.isRepeating) { + allKeyInputColumnsRepeating = false; + break; + } + if (!colVector.noNulls && colVector.isNull[0]) { + someKeyInputColumnIsNull = true; + } + } + } + + if (allKeyInputColumnsRepeating) { + + /* + * Repeating. + */ + + // All key input columns are repeating. Generate key once. Lookup once. + // Since the key is repeated, we must use entry 0 regardless of selectedInUse. + + /* + * Multi-Key specific repeated lookup. + */ + + JoinUtil.JoinResult joinResult; + if (someKeyInputColumnIsNull) { + // Any null key column is no match for whole batch. + joinResult = JoinUtil.JoinResult.NOMATCH; + } else { + + // All key input columns are repeating. Generate key once. Lookup once. + keyVectorSerializeWrite.setOutput(currentKeyOutput); + keyVectorSerializeWrite.serializeWrite(batch, 0); + byte[] keyBytes = currentKeyOutput.getData(); + int keyLength = currentKeyOutput.getLength(); + joinResult = hashMap.lookup(keyBytes, 0, keyLength, hashMapResults[0]); + } + + /* + * Common repeated join result processing. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); + } + numSel = finishOuterRepeated(batch, joinResult, hashMapResults[0], scratch1); + } else { + + /* + * NOT Repeating. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); + } + + int selected[] = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + // For outer join we must apply the filter after match and cause some matches to become + // non-matches, we do not track non-matches here. Instead we remember all non spilled rows + // and compute non matches later in finishOuter. + int hashMapResultCount = 0; + int matchCount = 0; + int nonSpillCount = 0; + int spillCount = 0; + + /* + * Multi-Key specific variables. + */ + + Output temp; + + // We optimize performance by only looking up the first key in a series of equal keys. + boolean haveSaveKey = false; + JoinUtil.JoinResult saveJoinResult = JoinUtil.JoinResult.NOMATCH; + + // Logical loop over the rows in the batch since the batch may have selected in use. + for (int logical = 0; logical < inputLogicalSize; logical++) { + int batchIndex = (selectedInUse ? selected[logical] : logical); + + /* + * Multi-Key outer null detection. + */ + + // Generate binary sortable key for current row in vectorized row batch. + keyVectorSerializeWrite.setOutput(currentKeyOutput); + boolean isNull = keyVectorSerializeWrite.serializeWrite(batch, batchIndex); + + if (isNull) { + + // Have that the NULL does not interfere with the current equal key series, if there + // is one. We do not set saveJoinResult. + // + // Let a current MATCH equal key series keep going, or + // Let a current SPILL equal key series keep going, or + // Let a current NOMATCH keep not matching. + + // Remember non-matches for Outer Join. + nonSpills[nonSpillCount++] = batchIndex; + // LOG.debug(CLASS_NAME + " logical " + logical + " batchIndex " + batchIndex + " NULL"); + } else { + + /* + * Multi-Key outer get key. + */ + + // Generated earlier to get possible null(s). + + /* + * Equal key series checking. + */ + + if (!haveSaveKey || !saveKeyOutput.arraysEquals(currentKeyOutput)) { + + // New key. + + if (haveSaveKey) { + // Move on with our count(s). + switch (saveJoinResult) { + case MATCH: + case SPILL: + hashMapResultCount++; + break; + case NOMATCH: + break; + } + } + + // Regardless of our matching result, we keep that information to make multiple use + // of it for a possible series of equal keys. + haveSaveKey = true; + + /* + * Multi-Key specific save key. + */ + + temp = saveKeyOutput; + saveKeyOutput = currentKeyOutput; + currentKeyOutput = temp; + + /* + * Multi-Key specific lookup key. + */ + + byte[] keyBytes = saveKeyOutput.getData(); + int keyLength = saveKeyOutput.getLength(); + saveJoinResult = hashMap.lookup(keyBytes, 0, keyLength, hashMapResults[hashMapResultCount]); + // LOG.debug(CLASS_NAME + " logical " + logical + " batchIndex " + batchIndex + " New Key " + saveJoinResult.name()); + } else { + // LOG.debug(CLASS_NAME + " logical " + logical + " batchIndex " + batchIndex + " Key Continues " + saveJoinResult.name()); + } + + /* + * Common outer join result processing. + */ + + switch (saveJoinResult) { + case MATCH: + matchs[matchCount] = batchIndex; + matchHashMapResultIndices[matchCount] = hashMapResultCount; + matchCount++; + nonSpills[nonSpillCount++] = batchIndex; + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashMapResultCount; + spillCount++; + break; + + case NOMATCH: + nonSpills[nonSpillCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH duplicate"); + break; + } + } + } + + if (haveSaveKey) { + // Account for last equal key sequence. + switch (saveJoinResult) { + case MATCH: + case SPILL: + hashMapResultCount++; + break; + case NOMATCH: + break; + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + + " matchs " + intArrayToRangesString(matchs, matchCount) + + " matchHashMapResultIndices " + intArrayToRangesString(matchHashMapResultIndices, matchCount) + + " nonSpills " + intArrayToRangesString(nonSpills, nonSpillCount) + + " spills " + intArrayToRangesString(spills, spillCount) + + " spillHashMapResultIndices " + intArrayToRangesString(spillHashMapResultIndices, spillCount) + + " hashMapResults " + Arrays.toString(Arrays.copyOfRange(hashMapResults, 0, hashMapResultCount))); + } + + // We will generate results for all matching and non-matching rows. + // Note that scratch1 is undefined at this point -- it's preallocated storage. + numSel = finishOuter(batch, + matchs, matchHashMapResultIndices, matchCount, + nonSpills, nonSpillCount, + spills, spillHashMapResultIndices, spillCount, + hashMapResults, hashMapResultCount, + scratch1); + } + + batch.selectedInUse = true; + batch.size = numSel; + + if (batch.size > 0) { + // Forward any remaining selected rows. + forwardBigTableBatch(batch); + } + + } catch (IOException e) { + throw new HiveException(e); + } catch (Exception e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java new file mode 100644 index 0000000..5167c19 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java @@ -0,0 +1,368 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; + +// Single-Column String hash table import. +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashMap; + +// Single-Column String specific imports. +import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr; + +/* + * Specialized class for doing a vectorized map join that is an outer join on a Single-Column String + * using a hash map. + */ +public class VectorMapJoinOuterStringOperator extends VectorMapJoinOuterGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(VectorMapJoinOuterStringOperator.class.getName()); + private static final String CLASS_NAME = VectorMapJoinOuterStringOperator.class.getName(); + + // (none) + + // The above members are initialized by the constructor and must not be + // transient. + //--------------------------------------------------------------------------- + + // The hash map for this specialized class. + private transient VectorMapJoinBytesHashMap hashMap; + + //--------------------------------------------------------------------------- + // Single-Column String specific members. + // + + // The column number for this one column join specialization. + private transient int singleJoinColumn; + + //--------------------------------------------------------------------------- + // Pass-thru constructors. + // + + public VectorMapJoinOuterStringOperator() { + super(); + } + + public VectorMapJoinOuterStringOperator(VectorizationContext vContext, OperatorDesc conf) throws HiveException { + super(vContext, conf); + } + + //--------------------------------------------------------------------------- + // Process Single-Column String Outer Join on a vectorized row batch. + // + + @Override + public void process(Object row, int tag) throws HiveException { + + try { + VectorizedRowBatch batch = (VectorizedRowBatch) row; + + alias = (byte) tag; + + if (needCommonSetup) { + // Our one time process method initialization. + commonSetup(batch); + + /* + * Initialize Single-Column String members for this specialized class. + */ + + singleJoinColumn = bigTableKeyColumnMap[0]; + + needCommonSetup = false; + } + + if (needHashTableSetup) { + // Setup our hash table specialization. It will be the first time the process + // method is called, or after a Hybrid Grace reload. + + /* + * Get our Single-Column String hash map information for this specialized class. + */ + + hashMap = (VectorMapJoinBytesHashMap) vectorMapJoinHashTable; + + needHashTableSetup = false; + } + + batchCounter++; + + // For outer join, DO NOT apply filters yet. It is incorrect for outer join to + // apply the filter before hash table matching. + + final int inputLogicalSize = batch.size; + + if (inputLogicalSize == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); + } + return; + } + + // Perform any key expressions. Results will go into scratch columns. + if (bigTableKeyExpressions != null) { + for (VectorExpression ve : bigTableKeyExpressions) { + ve.evaluate(batch); + } + } + + // We rebuild in-place the selected array with rows destine to be forwarded. + int numSel = 0; + + /* + * Single-Column String specific declarations. + */ + + // The one join column for this specialized class. + BytesColumnVector joinColVector = (BytesColumnVector) batch.cols[singleJoinColumn]; + byte[][] vector = joinColVector.vector; + int[] start = joinColVector.start; + int[] length = joinColVector.length; + + /* + * Single-Column String check for repeating. + */ + + // Check single column for repeating. + boolean allKeyInputColumnsRepeating = joinColVector.isRepeating; + + if (allKeyInputColumnsRepeating) { + + /* + * Repeating. + */ + + // All key input columns are repeating. Generate key once. Lookup once. + // Since the key is repeated, we must use entry 0 regardless of selectedInUse. + + /* + * Single-Column String specific repeated lookup. + */ + + JoinUtil.JoinResult joinResult; + if (!joinColVector.noNulls && joinColVector.isNull[0]) { + // Null key is no match for whole batch. + joinResult = JoinUtil.JoinResult.NOMATCH; + } else { + // Handle *repeated* join key, if found. + byte[] keyBytes = vector[0]; + int keyStart = start[0]; + int keyLength = length[0]; + joinResult = hashMap.lookup(keyBytes, keyStart, keyLength, hashMapResults[0]); + } + + /* + * Common repeated join result processing. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); + } + numSel = finishOuterRepeated(batch, joinResult, hashMapResults[0], scratch1); + } else { + + /* + * NOT Repeating. + */ + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); + } + + int selected[] = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + // For outer join we must apply the filter after match and cause some matches to become + // non-matches, we do not track non-matches here. Instead we remember all non spilled rows + // and compute non matches later in finishOuter. + int hashMapResultCount = 0; + int matchCount = 0; + int nonSpillCount = 0; + int spillCount = 0; + + /* + * Single-Column String specific variables. + */ + + int saveKeyBatchIndex = -1; + + // We optimize performance by only looking up the first key in a series of equal keys. + boolean haveSaveKey = false; + JoinUtil.JoinResult saveJoinResult = JoinUtil.JoinResult.NOMATCH; + + // Logical loop over the rows in the batch since the batch may have selected in use. + for (int logical = 0; logical < inputLogicalSize; logical++) { + int batchIndex = (selectedInUse ? selected[logical] : logical); + + /* + * Single-Column String outer null detection. + */ + + boolean isNull = !joinColVector.noNulls && joinColVector.isNull[batchIndex]; + + if (isNull) { + + // Have that the NULL does not interfere with the current equal key series, if there + // is one. We do not set saveJoinResult. + // + // Let a current MATCH equal key series keep going, or + // Let a current SPILL equal key series keep going, or + // Let a current NOMATCH keep not matching. + + // Remember non-matches for Outer Join. + nonSpills[nonSpillCount++] = batchIndex; + // LOG.debug(CLASS_NAME + " logical " + logical + " batchIndex " + batchIndex + " NULL"); + } else { + + /* + * Single-Column String outer get key. + */ + + // Implicit -- use batchIndex. + + /* + * Equal key series checking. + */ + + if (!haveSaveKey || + StringExpr.compare(vector[saveKeyBatchIndex], start[saveKeyBatchIndex], length[saveKeyBatchIndex], + vector[batchIndex], start[batchIndex], length[batchIndex]) != 0) { + // New key. + + if (haveSaveKey) { + // Move on with our count(s). + switch (saveJoinResult) { + case MATCH: + case SPILL: + hashMapResultCount++; + break; + case NOMATCH: + break; + } + } + + // Regardless of our matching result, we keep that information to make multiple use + // of it for a possible series of equal keys. + haveSaveKey = true; + + /* + * Single-Column String specific save key. + */ + + saveKeyBatchIndex = batchIndex; + + /* + * Single-Column Long specific lookup key. + */ + + byte[] keyBytes = vector[batchIndex]; + int keyStart = start[batchIndex]; + int keyLength = length[batchIndex]; + + saveJoinResult = hashMap.lookup(keyBytes, keyStart, keyLength, hashMapResults[hashMapResultCount]); + // LOG.debug(CLASS_NAME + " logical " + logical + " batchIndex " + batchIndex + " New Key " + saveJoinResult.name()); + } else { + // LOG.debug(CLASS_NAME + " logical " + logical + " batchIndex " + batchIndex + " Key Continues " + saveJoinResult.name()); + } + + /* + * Common outer join result processing. + */ + + switch (saveJoinResult) { + case MATCH: + matchs[matchCount] = batchIndex; + matchHashMapResultIndices[matchCount] = hashMapResultCount; + matchCount++; + nonSpills[nonSpillCount++] = batchIndex; + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashMapResultCount; + spillCount++; + break; + + case NOMATCH: + nonSpills[nonSpillCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH duplicate"); + break; + } + } + } + + if (haveSaveKey) { + // Account for last equal key sequence. + switch (saveJoinResult) { + case MATCH: + case SPILL: + hashMapResultCount++; + break; + case NOMATCH: + break; + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + + " matchs " + intArrayToRangesString(matchs, matchCount) + + " matchHashMapResultIndices " + intArrayToRangesString(matchHashMapResultIndices, matchCount) + + " nonSpills " + intArrayToRangesString(nonSpills, nonSpillCount) + + " spills " + intArrayToRangesString(spills, spillCount) + + " spillHashMapResultIndices " + intArrayToRangesString(spillHashMapResultIndices, spillCount) + + " hashMapResults " + Arrays.toString(Arrays.copyOfRange(hashMapResults, 0, hashMapResultCount))); + } + + // We will generate results for all matching and non-matching rows. + // Note that scratch1 is undefined at this point -- it's preallocated storage. + numSel = finishOuter(batch, + matchs, matchHashMapResultIndices, matchCount, + nonSpills, nonSpillCount, + spills, spillHashMapResultIndices, spillCount, + hashMapResults, hashMapResultCount, + scratch1); + } + + batch.selectedInUse = true; + batch.size = numSel; + + if (batch.size > 0) { + // Forward any remaining selected rows. + forwardBigTableBatch(batch); + } + + } catch (IOException e) { + throw new HiveException(e); + } catch (Exception e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinRowBytesContainer.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinRowBytesContainer.java new file mode 100644 index 0000000..c8359d3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinRowBytesContainer.java @@ -0,0 +1,318 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hive.serde2.ByteStream.Output; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; + +/** + * An eager bytes container that puts row bytes to an output stream. + */ +public class VectorMapJoinRowBytesContainer { + + private static final Log LOG = LogFactory.getLog(VectorMapJoinRowBytesContainer.class); + + private File parentFile; + private File tmpFile; + + // We buffer in a org.apache.hadoop.hive.serde2.ByteStream.Output since that is what + // is used by VectorSerializeRow / SerializeWrite. Periodically, we flush this buffer + // to disk. + private Output output; + private int rowBeginPos; + private static final int OUTPUT_SIZE = 4096; + private static final int THRESHOLD = 8 * (OUTPUT_SIZE / 10); + private static final int INPUT_SIZE = 4096; + + private FileOutputStream fileOutputStream; + + private boolean isOpen; + + private byte[] readBuffer; + private byte[] largeRowBuffer; + private int readOffset; + private int readLength; + + private int readNextCount; + private int readNextIndex; + + private static final int MAX_READS = 256; + private byte[][] readNextBytes; + private int readNextOffsets[]; + private int readNextLengths[]; + + private byte[] currentBytes; + private int currentOffset; + private int currentLength; + + private long totalWriteLength; + private long totalReadLength; + + private FileInputStream fileInputStream; + + public VectorMapJoinRowBytesContainer() { + output = new Output(); + readBuffer = new byte[INPUT_SIZE]; + readNextBytes = new byte[MAX_READS][]; + readNextOffsets = new int[MAX_READS]; + readNextLengths = new int[MAX_READS]; + isOpen = false; + totalWriteLength = 0; + totalReadLength = 0; + } + + private void setupOutputFileStreams() throws IOException { + + parentFile = File.createTempFile("bytes-container", ""); + if (parentFile.delete() && parentFile.mkdir()) { + parentFile.deleteOnExit(); + } + + tmpFile = File.createTempFile("BytesContainer", ".tmp", parentFile); + LOG.info("BytesContainer created temp file " + tmpFile.getAbsolutePath()); + tmpFile.deleteOnExit(); + + fileOutputStream = new FileOutputStream(tmpFile); + } + + private void initFile() { + try { + setupOutputFileStreams(); + } catch (IOException e) { + throw new RuntimeException("Failed to create temporary output file on disk", e); + } + } + + public Output getOuputForRowBytes() { + if (!isOpen) { + initFile(); + isOpen = true; + } + // Reserve space for the int length. + output.reserve(4); + rowBeginPos = output.getLength(); + return output; + } + + public void finishRow() throws IOException { + int length = output.getLength() - rowBeginPos; + output.writeInt(rowBeginPos - 4, length); + if (output.getLength() > THRESHOLD) { + fileOutputStream.write(output.getData(), 0, output.getLength()); + totalWriteLength += output.getLength(); + output.reset(); + } + } + + public void prepareForReading() throws IOException { + if (!isOpen) { + return; + } + if (output.getLength() > 0) { + fileOutputStream.write(output.getData(), 0, output.getLength()); + totalWriteLength += output.getLength(); + fileOutputStream.flush(); + output.reset(); + } + if (fileInputStream != null) { + fileInputStream.close(); + } + fileInputStream = new FileInputStream(tmpFile); + readNextIndex = 0; + readNextCount = 0; + } + + private int readInt() { + int value = (((readBuffer[readOffset] & 0xFF) << 24) | + ((readBuffer[readOffset + 1] & 0xFF) << 16) | + ((readBuffer[readOffset + 2] & 0xFF) << 8) | + ((readBuffer[readOffset + 3] & 0xFF))); + readOffset += 4; + return value; + } + + // Call when nextReadIndex == nextReadCount. + private void bufferedRead() throws IOException { + + // Reset for reading. + readNextIndex = 0; + + // Reset for filling. + readNextCount = 0; + + if (readOffset < readLength) { + // Move unprocessed remainder to beginning of buffer. + int unprocessLength = readLength - readOffset; + System.arraycopy(readBuffer, readOffset, readBuffer, 0, unprocessLength); + + int maxReadLength = readBuffer.length - unprocessLength; + int partialReadLength = fileInputStream.read(readBuffer, unprocessLength, maxReadLength); + if (partialReadLength == -1) { + partialReadLength = 0; + } + totalReadLength += partialReadLength; + readLength = unprocessLength + partialReadLength; + readOffset = 0; + } else { + readOffset = 0; + readLength = fileInputStream.read(readBuffer, 0, readBuffer.length); + if (readLength == -1) { + readLength = 0; + } + totalReadLength += readLength; + } + if (readLength == 0) { + return; + } + if (readLength < 0) { + throw new IOException("Negative read length"); + } + + // Get length word. + if (readLength < 4) { + throw new IOException("Expecting 4 byte length"); + } + + while (true) { + // Use Input class to read length. + int saveReadOffset = readOffset; + int rowLength = readInt(); + if (rowLength < 0) { + throw new IOException("Negative row length"); + } + int remainingLength = readLength - readOffset; + if (remainingLength < rowLength) { + if (readNextCount > 0) { + // Leave this one for the next round. + readOffset = saveReadOffset; + break; + } + + // Buffer needed to bridge. + if (largeRowBuffer == null || largeRowBuffer.length < rowLength) { + int newLargeBufferLength = Math.max(Integer.highestOneBit(rowLength) << 1, INPUT_SIZE); + largeRowBuffer = new byte[newLargeBufferLength]; + } + System.arraycopy(readBuffer, readOffset, largeRowBuffer, 0, remainingLength); + int expectedPartialLength = rowLength - remainingLength; + int partialReadLength = fileInputStream.read(largeRowBuffer, remainingLength, expectedPartialLength); + if (partialReadLength == -1) { + throw new IOException("Unexpected EOF (total write length " + totalWriteLength + + ", total read length " + totalReadLength + ", read length " + + expectedPartialLength + ")"); + } + + if (expectedPartialLength != partialReadLength) { + throw new IOException("Unable to read a complete row of length " + rowLength + + " (total write length " + totalWriteLength + + ", total read length " + totalReadLength + ", read length " + + expectedPartialLength + ", actual length " + partialReadLength + ")"); + } + totalReadLength += partialReadLength; + + readNextBytes[readNextCount] = largeRowBuffer; + readNextOffsets[readNextCount] = 0; + readNextLengths[readNextCount] = rowLength; + + // Indicate we used the last row's bytes for large buffer. + readOffset = readLength; + readNextCount++; + break; + } + + readNextBytes[readNextCount] = readBuffer; + readNextOffsets[readNextCount] = readOffset; + readNextLengths[readNextCount] = rowLength; + readOffset += rowLength; + readNextCount++; + + if (readNextCount >= readNextBytes.length){ + break; + } + if (readLength - readOffset < 4) { + // Handle in next round. + break; + } + } + } + + public boolean readNext() throws IOException { + if (!isOpen) { + return false; + } + if (readNextIndex >= readNextCount) { + bufferedRead(); + // Any more left? + if (readNextIndex >= readNextCount) { + return false; + } + } + + currentBytes = readNextBytes[readNextIndex]; + currentOffset = readNextOffsets[readNextIndex]; + currentLength = readNextLengths[readNextIndex]; + + readNextIndex++; + return true; + } + + public byte[] currentBytes() { + return currentBytes; + } + + public int currentOffset() { + return currentOffset; + } + + public int currentLength() { + return currentLength; + } + + public void clear() throws IOException { + if (fileInputStream != null) { + try { + fileInputStream.close(); + } catch (Throwable ignored) { + } + fileInputStream = null; + } + if (fileOutputStream != null) { + try { + fileOutputStream.close(); + } catch (Throwable ignored) { + } + fileOutputStream = null; + } + try { + FileUtil.fullyDelete(parentFile); + } catch (Throwable ignored) { + } + parentFile = null; + tmpFile = null; + isOpen = false; + totalWriteLength = 0; + } + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMap.java new file mode 100644 index 0000000..0796406 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMap.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashMap; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMapResult; +import org.apache.hadoop.io.BytesWritable; + +/* + * An single byte array value hash map optimized for vector map join. + */ +public abstract class VectorMapJoinFastBytesHashMap + extends VectorMapJoinFastBytesHashTable + implements VectorMapJoinBytesHashMap { + + private static final Log LOG = LogFactory.getLog(VectorMapJoinFastBytesHashMap.class); + + private VectorMapJoinFastValueStore valueStore; + + @Override + public VectorMapJoinHashMapResult createHashMapResult() { + return new VectorMapJoinFastValueStore.HashMapResult(); + } + + @Override + public void assignSlot(int slot, byte[] keyBytes, int keyStart, int keyLength, + long hashCode, boolean isNewKey, BytesWritable currentValue) { + + byte[] valueBytes = currentValue.getBytes(); + int valueLength = currentValue.getLength(); + + int tripleIndex = 3 * slot; + if (isNewKey) { + // First entry. + slotTriples[tripleIndex] = keyStore.add(keyBytes, keyStart, keyLength); + slotTriples[tripleIndex + 1] = hashCode; + slotTriples[tripleIndex + 2] = valueStore.addFirst(valueBytes, 0, valueLength); + // LOG.info("VectorMapJoinFastBytesHashMap add first keyRefWord " + Long.toHexString(slotTriples[tripleIndex]) + " hashCode " + Long.toHexString(slotTriples[tripleIndex + 1]) + " valueRefWord " + Long.toHexString(slotTriples[tripleIndex + 2])); + keysAssigned++; + } else { + // Add another value. + // LOG.info("VectorMapJoinFastBytesHashMap add more keyRefWord " + Long.toHexString(slotTriples[tripleIndex]) + " hashCode " + Long.toHexString(slotTriples[tripleIndex + 1]) + " valueRefWord " + Long.toHexString(slotTriples[tripleIndex + 2])); + slotTriples[tripleIndex + 2] = valueStore.addMore(slotTriples[tripleIndex + 2], valueBytes, 0, valueLength); + // LOG.info("VectorMapJoinFastBytesHashMap add more new valueRefWord " + Long.toHexString(slotTriples[tripleIndex + 2])); + } + } + + @Override + public JoinUtil.JoinResult lookup(byte[] keyBytes, int keyStart, int keyLength, VectorMapJoinHashMapResult hashMapResult) { + VectorMapJoinFastValueStore.HashMapResult optimizedHashMapResult = + (VectorMapJoinFastValueStore.HashMapResult) hashMapResult; + + optimizedHashMapResult.forget(); + + long hashCode = VectorMapJoinFastBytesHashUtil.hashKey(keyBytes, keyStart, keyLength); + long valueRefWord = findReadSlot(keyBytes, keyStart, keyLength, hashCode); + JoinUtil.JoinResult joinResult; + if (valueRefWord == -1) { + joinResult = JoinUtil.JoinResult.NOMATCH; + } else { + // LOG.info("VectorMapJoinFastBytesHashMap lookup hashCode " + Long.toHexString(hashCode) + " valueRefWord " + Long.toHexString(valueRefWord) + " (valueStore != null) " + (valueStore != null)); + + optimizedHashMapResult.set(valueStore, valueRefWord); + + joinResult = JoinUtil.JoinResult.MATCH; + } + + optimizedHashMapResult.setJoinResult(joinResult); + + return joinResult; + } + + public VectorMapJoinFastBytesHashMap( + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(initialCapacity, loadFactor, writeBuffersSize, memUsage); + + valueStore = new VectorMapJoinFastValueStore(writeBuffersSize); + + // Share the same write buffers with our value store. + keyStore = new VectorMapJoinFastKeyStore(valueStore.writeBuffers()); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSet.java new file mode 100644 index 0000000..d685c22 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSet.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashMultiSet; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMultiSetResult; +import org.apache.hadoop.io.BytesWritable; + +/* + * An single byte array value hash multi-set optimized for vector map join. + */ +public abstract class VectorMapJoinFastBytesHashMultiSet + extends VectorMapJoinFastBytesHashTable + implements VectorMapJoinBytesHashMultiSet { + + private static final Log LOG = LogFactory.getLog(VectorMapJoinFastBytesHashMultiSet.class); + + @Override + public VectorMapJoinHashMultiSetResult createHashMultiSetResult() { + return new VectorMapJoinFastHashMultiSet.HashMultiSetResult(); + } + + @Override + public void assignSlot(int slot, byte[] keyBytes, int keyStart, int keyLength, + long hashCode, boolean isNewKey, BytesWritable currentValue) { + + int tripleIndex = 3 * slot; + if (isNewKey) { + // First entry. + slotTriples[tripleIndex] = keyStore.add(keyBytes, keyStart, keyLength); + slotTriples[tripleIndex + 1] = hashCode; + slotTriples[tripleIndex + 2] = 1; // Count. + // LOG.info("VectorMapJoinFastBytesHashMap add first keyRefWord " + Long.toHexString(slotTriples[tripleIndex]) + " hashCode " + Long.toHexString(slotTriples[tripleIndex + 1]) + " valueRefWord " + Long.toHexString(slotTriples[tripleIndex + 2])); + keysAssigned++; + } else { + // Add another value. + // LOG.info("VectorMapJoinFastBytesHashMap add more keyRefWord " + Long.toHexString(slotTriples[tripleIndex]) + " hashCode " + Long.toHexString(slotTriples[tripleIndex + 1]) + " valueRefWord " + Long.toHexString(slotTriples[tripleIndex + 2])); + slotTriples[tripleIndex + 2]++; + } + } + + @Override + public JoinUtil.JoinResult contains(byte[] keyBytes, int keyStart, int keyLength, + VectorMapJoinHashMultiSetResult hashMultiSetResult) { + + VectorMapJoinFastHashMultiSet.HashMultiSetResult optimizedHashMultiSetResult = + (VectorMapJoinFastHashMultiSet.HashMultiSetResult) hashMultiSetResult; + + optimizedHashMultiSetResult.forget(); + + long hashCode = VectorMapJoinFastBytesHashUtil.hashKey(keyBytes, keyStart, keyLength); + long count = findReadSlot(keyBytes, keyStart, keyLength, hashCode); + JoinUtil.JoinResult joinResult; + if (count == -1) { + joinResult = JoinUtil.JoinResult.NOMATCH; + } else { + + optimizedHashMultiSetResult.set(count); + + joinResult = JoinUtil.JoinResult.MATCH; + } + + optimizedHashMultiSetResult.setJoinResult(joinResult); + + return joinResult; + } + + public VectorMapJoinFastBytesHashMultiSet( + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(initialCapacity, loadFactor, writeBuffersSize, memUsage); + + keyStore = new VectorMapJoinFastKeyStore(writeBuffersSize); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSet.java new file mode 100644 index 0000000..9f20fdc --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSet.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashSet; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashSetResult; +import org.apache.hadoop.io.BytesWritable; + +/* + * An single byte array value hash multi-set optimized for vector map join. + */ +public abstract class VectorMapJoinFastBytesHashSet + extends VectorMapJoinFastBytesHashTable + implements VectorMapJoinBytesHashSet { + + private static final Log LOG = LogFactory.getLog(VectorMapJoinFastBytesHashSet.class); + + @Override + public VectorMapJoinHashSetResult createHashSetResult() { + return new VectorMapJoinFastHashSet.HashSetResult(); + } + + @Override + public void assignSlot(int slot, byte[] keyBytes, int keyStart, int keyLength, + long hashCode, boolean isNewKey, BytesWritable currentValue) { + + int tripleIndex = 3 * slot; + if (isNewKey) { + // First entry. + slotTriples[tripleIndex] = keyStore.add(keyBytes, keyStart, keyLength); + slotTriples[tripleIndex + 1] = hashCode; + slotTriples[tripleIndex + 2] = 1; // Existence + keysAssigned++; + } + } + + @Override + public JoinUtil.JoinResult contains(byte[] keyBytes, int keyStart, int keyLength, + VectorMapJoinHashSetResult hashSetResult) { + + VectorMapJoinFastHashSet.HashSetResult optimizedHashSetResult = + (VectorMapJoinFastHashSet.HashSetResult) hashSetResult; + + optimizedHashSetResult.forget(); + + long hashCode = VectorMapJoinFastBytesHashUtil.hashKey(keyBytes, keyStart, keyLength); + long existance = findReadSlot(keyBytes, keyStart, keyLength, hashCode); + JoinUtil.JoinResult joinResult; + if (existance == -1) { + joinResult = JoinUtil.JoinResult.NOMATCH; + } else { + joinResult = JoinUtil.JoinResult.MATCH; + } + + optimizedHashSetResult.setJoinResult(joinResult); + + return joinResult; + } + + public VectorMapJoinFastBytesHashSet( + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(initialCapacity, loadFactor, writeBuffersSize, memUsage); + + keyStore = new VectorMapJoinFastKeyStore(writeBuffersSize); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java new file mode 100644 index 0000000..594a77f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java @@ -0,0 +1,221 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashTable; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.io.BytesWritable; + +import com.google.common.annotations.VisibleForTesting; + +/* + * An single byte array value hash map optimized for vector map join. + */ +public abstract class VectorMapJoinFastBytesHashTable + extends VectorMapJoinFastHashTable + implements VectorMapJoinBytesHashTable { + + private static final Log LOG = LogFactory.getLog(VectorMapJoinFastBytesHashTable.class); + + protected VectorMapJoinFastKeyStore keyStore; + + private BytesWritable testKeyBytesWritable; + private BytesWritable testValueBytesWritable; + + @Override + public void putRow(BytesWritable currentKey, BytesWritable currentValue) throws HiveException, IOException { + // No deserialization of key(s) here -- just get reference to bytes. + byte[] keyBytes = currentKey.getBytes(); + int keyLength = currentKey.getLength(); + add(keyBytes, 0, keyLength, currentValue); + } + + @VisibleForTesting + public void putRow(byte[] currentKey, byte[] currentValue) throws HiveException, IOException { + if (testKeyBytesWritable == null) { + testKeyBytesWritable = new BytesWritable(); + testValueBytesWritable = new BytesWritable(); + } + testKeyBytesWritable.set(currentKey, 0, currentKey.length); + testValueBytesWritable.set(currentValue, 0, currentValue.length); + putRow(testKeyBytesWritable, testValueBytesWritable); + } + + protected abstract void assignSlot(int slot, byte[] keyBytes, int keyStart, int keyLength, + long hashCode, boolean isNewKey, BytesWritable currentValue); + + public void add(byte[] keyBytes, int keyStart, int keyLength, BytesWritable currentValue) { + + if (resizeThreshold <= keysAssigned) { + expandAndRehash(); + } + + long hashCode = VectorMapJoinFastBytesHashUtil.hashKey(keyBytes, keyStart, keyLength); + int intHashCode = (int) hashCode; + int slot = (intHashCode & logicalHashBucketMask); + long probeSlot = slot; + int i = 0; + boolean isNewKey; + while (true) { + int tripleIndex = 3 * slot; + if (slotTriples[tripleIndex] == 0) { + // LOG.info("VectorMapJoinFastBytesHashMap findWriteSlot slot " + slot + " tripleIndex " + tripleIndex + " empty"); + isNewKey = true;; + break; + } + if (hashCode == slotTriples[tripleIndex + 1] && + keyStore.equalKey(slotTriples[tripleIndex], keyBytes, keyStart, keyLength)) { + // LOG.info("VectorMapJoinFastBytesHashMap findWriteSlot slot " + slot + " tripleIndex " + tripleIndex + " existing"); + isNewKey = false; + break; + } + // TODO + ++metricPutConflict; + // Some other key (collision) - keep probing. + probeSlot += (++i); + slot = (int) (probeSlot & logicalHashBucketMask); + } + + if (largestNumberOfSteps < i) { + if (LOG.isDebugEnabled()) { + LOG.debug("Probed " + i + " slots (the longest so far) to find space"); + } + largestNumberOfSteps = i; + // debugDumpKeyProbe(keyOffset, keyLength, hashCode, slot); + } + + assignSlot(slot, keyBytes, keyStart, keyLength, hashCode, isNewKey, currentValue); + + if (isNewKey) { + keysAssigned++; + } + } + + private void expandAndRehash() { + + int newLogicalHashBucketCount = logicalHashBucketCount * 2; + int newLogicalHashBucketMask = newLogicalHashBucketCount - 1; + int newMetricPutConflict = 0; + int newLargestNumberOfSteps = 0; + + int newSlotTripleArraySize = newLogicalHashBucketCount * 3; + long[] newSlotTriples = new long[newSlotTripleArraySize]; + + for (int slot = 0; slot < logicalHashBucketCount; slot++) { + int tripleIndex = slot * 3; + long keyRef = slotTriples[tripleIndex]; + if (keyRef != 0) { + long hashCode = slotTriples[tripleIndex + 1]; + long valueRef = slotTriples[tripleIndex + 2]; + + // Copy to new slot table. + int intHashCode = (int) hashCode; + int newSlot = intHashCode & newLogicalHashBucketMask; + long newProbeSlot = newSlot; + int newTripleIndex; + int i = 0; + while (true) { + newTripleIndex = newSlot * 3; + long newKeyRef = newSlotTriples[newTripleIndex]; + if (newKeyRef == 0) { + break; + } + ++newMetricPutConflict; + // Some other key (collision) - keep probing. + newProbeSlot += (++i); + newSlot = (int)(newProbeSlot & newLogicalHashBucketMask); + } + + if (newLargestNumberOfSteps < i) { + if (LOG.isDebugEnabled()) { + LOG.debug("Probed " + i + " slots (the longest so far) to find space"); + } + newLargestNumberOfSteps = i; + // debugDumpKeyProbe(keyOffset, keyLength, hashCode, slot); + } + + // Use old value reference word. + // LOG.info("VectorMapJoinFastLongHashTable expandAndRehash key " + tableKey + " slot " + newSlot + " newPairIndex " + newPairIndex + " empty slot (i = " + i + ")"); + + newSlotTriples[newTripleIndex] = keyRef; + newSlotTriples[newTripleIndex + 1] = hashCode; + newSlotTriples[newTripleIndex + 2] = valueRef; + } + } + + slotTriples = newSlotTriples; + logicalHashBucketCount = newLogicalHashBucketCount; + logicalHashBucketMask = newLogicalHashBucketMask; + metricPutConflict = newMetricPutConflict; + largestNumberOfSteps = newLargestNumberOfSteps; + resizeThreshold = (int)(logicalHashBucketCount * loadFactor); + metricExpands++; + // LOG.info("VectorMapJoinFastLongHashTable expandAndRehash new logicalHashBucketCount " + logicalHashBucketCount + " resizeThreshold " + resizeThreshold + " metricExpands " + metricExpands); + } + + protected long findReadSlot(byte[] keyBytes, int keyStart, int keyLength, long hashCode) { + + int intHashCode = (int) hashCode; + int slot = (intHashCode & logicalHashBucketMask); + long probeSlot = slot; + int i = 0; + while (true) { + int tripleIndex = slot * 3; + // LOG.info("VectorMapJoinFastBytesHashMap findReadSlot slot keyRefWord " + Long.toHexString(slotTriples[tripleIndex]) + " hashCode " + Long.toHexString(hashCode) + " entry hashCode " + Long.toHexString(slotTriples[tripleIndex + 1]) + " valueRefWord " + Long.toHexString(slotTriples[tripleIndex + 2])); + if (slotTriples[tripleIndex] != 0 && hashCode == slotTriples[tripleIndex + 1]) { + // Finally, verify the key bytes match. + + if (keyStore.equalKey(slotTriples[tripleIndex], keyBytes, keyStart, keyLength)) { + return slotTriples[tripleIndex + 2]; + } + } + // Some other key (collision) - keep probing. + probeSlot += (++i); + if (i > largestNumberOfSteps) { + // We know we never went that far when we were inserting. + return -1; + } + slot = (int)(probeSlot & logicalHashBucketMask); + } + } + + /* + * The hash table slots. For a bytes key hash table, each slot is 3 longs and the array is + * 3X sized. + * + * The slot triple is 1) a non-zero reference word to the key bytes, 2) the key hash code, and + * 3) a non-zero reference word to the first value bytes. + */ + protected long[] slotTriples; + + private void allocateBucketArray() { + int slotTripleArraySize = 3 * logicalHashBucketCount; + slotTriples = new long[slotTripleArraySize]; + } + + public VectorMapJoinFastBytesHashTable( + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(initialCapacity, loadFactor, writeBuffersSize, memUsage); + allocateBucketArray(); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashUtil.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashUtil.java new file mode 100644 index 0000000..28f7357 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashUtil.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import org.apache.hadoop.hive.serde2.WriteBuffers; + +public class VectorMapJoinFastBytesHashUtil { + + public static long hashKey(byte[] bytes, int start, int length) { + return WriteBuffers.murmurHash(bytes, start, length); + } + + public static String displayBytes(byte[] bytes, int start, int length) { + StringBuilder sb = new StringBuilder(); + for (int i = start; i < start + length; i++) { + char ch = (char) bytes[i]; + if (ch < ' ' || ch > '~') { + sb.append(String.format("\\%03d", (int) (bytes[i] & 0xff))); + } else { + sb.append(ch); + } + } + return sb.toString(); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashMap.java new file mode 100644 index 0000000..b37247c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashMap.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMap; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMapResult; + +public abstract class VectorMapJoinFastHashMap + extends VectorMapJoinFastHashTable + implements VectorMapJoinHashMap { + + @Override + public VectorMapJoinHashMapResult createHashMapResult() { + return new VectorMapJoinFastValueStore.HashMapResult(); + } + + public VectorMapJoinFastHashMap( + boolean isOuterJoin, + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(initialCapacity, loadFactor, writeBuffersSize, memUsage); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashMultiSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashMultiSet.java new file mode 100644 index 0000000..5569f6e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashMultiSet.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMultiSet; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMultiSetResult; + +public abstract class VectorMapJoinFastHashMultiSet + extends VectorMapJoinFastHashTable implements VectorMapJoinHashMultiSet { + + @Override + public VectorMapJoinHashMultiSetResult createHashMultiSetResult() { + return new HashMultiSetResult(); + } + + public static class HashMultiSetResult extends VectorMapJoinHashMultiSetResult { + + HashMultiSetResult() { + super(); + } + + public void set(long count) { + this.count = count; + } + } + + public VectorMapJoinFastHashMultiSet( + boolean isOuterJoin, + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(initialCapacity, loadFactor, writeBuffersSize, memUsage); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashSet.java new file mode 100644 index 0000000..0738df3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashSet.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashSet; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashSetResult; + +public abstract class VectorMapJoinFastHashSet + extends VectorMapJoinFastHashTable implements VectorMapJoinHashSet { + + @Override + public VectorMapJoinHashSetResult createHashSetResult() { + return new HashSetResult(); + } + + public static class HashSetResult extends VectorMapJoinHashSetResult { + + HashSetResult() { + super(); + } + } + + public VectorMapJoinFastHashSet( + boolean isOuterJoin, + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(initialCapacity, loadFactor, writeBuffersSize, memUsage); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTable.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTable.java new file mode 100644 index 0000000..33e34fa --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTable.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTable; + +public abstract class VectorMapJoinFastHashTable implements VectorMapJoinHashTable { + public static final Log LOG = LogFactory.getLog(VectorMapJoinFastHashTable.class); + + protected int logicalHashBucketCount; + protected int logicalHashBucketMask; + + protected float loadFactor; + protected int writeBuffersSize; + protected long memUsage; + + protected int metricPutConflict; + protected int largestNumberOfSteps; + protected int keysAssigned; + protected int resizeThreshold; + protected int metricExpands; + + private static void validateCapacity(long capacity) { + if (Long.bitCount(capacity) != 1) { + throw new AssertionError("Capacity must be a power of two"); + } + if (capacity <= 0) { + throw new AssertionError("Invalid capacity " + capacity); + } + } + + private static int nextHighestPowerOfTwo(int v) { + return Integer.highestOneBit(v) << 1; + } + + public VectorMapJoinFastHashTable( + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + + initialCapacity = (Long.bitCount(initialCapacity) == 1) + ? initialCapacity : nextHighestPowerOfTwo(initialCapacity); + + validateCapacity(initialCapacity); + + logicalHashBucketCount = initialCapacity; + logicalHashBucketMask = logicalHashBucketCount - 1; + resizeThreshold = (int)(logicalHashBucketCount * loadFactor); + + this.loadFactor = loadFactor; + this.writeBuffersSize = writeBuffersSize; + this.memUsage = memUsage; + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java new file mode 100644 index 0000000..e83ee8b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.MapJoinOperator; +import org.apache.hadoop.hive.ql.exec.MapredContext; +import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; +import org.apache.hadoop.hive.ql.exec.persistence.HashMapWrapper; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe; +import org.apache.hadoop.hive.ql.exec.tez.TezContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.MapJoinDesc; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.tez.runtime.api.Input; +import org.apache.tez.runtime.api.LogicalInput; +import org.apache.tez.runtime.library.api.KeyValueReader; + +/** + * HashTableLoader for Tez constructs the hashtable from records read from + * a broadcast edge. + */ +public class VectorMapJoinFastHashTableLoader implements org.apache.hadoop.hive.ql.exec.HashTableLoader { + + private static final Log LOG = LogFactory.getLog(VectorMapJoinFastHashTableLoader.class.getName()); + + private Configuration hconf; + protected MapJoinDesc desc; + private TezContext tezContext; + + @Override + public void init(ExecMapperContext context, MapredContext mrContext, + Configuration hconf, MapJoinOperator joinOp) { + this.tezContext = (TezContext) mrContext; + this.hconf = hconf; + this.desc = joinOp.getConf(); + } + + @Override + public void load(MapJoinTableContainer[] mapJoinTables, + MapJoinTableContainerSerDe[] mapJoinTableSerdes, long memUsage) + throws HiveException { + + Map parentToInput = desc.getParentToInput(); + Map parentKeyCounts = desc.getParentKeyCounts(); + + for (int pos = 0; pos < mapJoinTables.length; pos++) { + if (pos == desc.getPosBigTable()) { + continue; + } + + String inputName = parentToInput.get(pos); + LogicalInput input = tezContext.getInput(inputName); + + try { + input.start(); + tezContext.getTezProcessorContext().waitForAnyInputReady( + Collections. singletonList(input)); + } catch (Exception e) { + throw new HiveException(e); + } + + try { + KeyValueReader kvReader = (KeyValueReader) input.getReader(); + + Long keyCountObj = parentKeyCounts.get(pos); + long keyCount = (keyCountObj == null) ? -1 : keyCountObj.longValue(); + + VectorMapJoinFastTableContainer VectorMapJoinFastTableContainer = + new VectorMapJoinFastTableContainer(desc, hconf, keyCount, memUsage); + + while (kvReader.next()) { + VectorMapJoinFastTableContainer.putRow(kvReader); + } + + VectorMapJoinFastTableContainer.seal(); + mapJoinTables[pos] = (MapJoinTableContainer) VectorMapJoinFastTableContainer; + + } catch (IOException e) { + throw new HiveException(e); + } catch (SerDeException e) { + throw new HiveException(e); + } catch (Exception e) { + throw new HiveException(e); + } + } + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastIntHashUtil.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastIntHashUtil.java new file mode 100644 index 0000000..a818cb2 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastIntHashUtil.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +public class VectorMapJoinFastIntHashUtil { + + public static int hashKey(int key) { + key = ~key + (key << 15); // key = (key << 15) - key - 1; + key = key ^ (key >>> 12); + key = key + (key << 2); + key = key ^ (key >>> 4); + key = key * 2057; // key = (key + (key << 3)) + (key << 11); + key = key ^ (key >>> 16); + return key; + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastKeyStore.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastKeyStore.java new file mode 100644 index 0000000..6f7615d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastKeyStore.java @@ -0,0 +1,158 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.serde2.WriteBuffers; + +// Optimized for sequential key lookup. + +public class VectorMapJoinFastKeyStore { + + private static final Log LOG = LogFactory.getLog(VectorMapJoinFastKeyStore.class.getName()); + + private WriteBuffers writeBuffers; + + private WriteBuffers.ByteSegmentRef byteSegmentRef; + private WriteBuffers.Position readPos; + + /** + * Bit-length fields within a 64-bit (long) key reference. + * + * Lowest field: An absolute byte offset the the key in the WriteBuffers. + * + * Next field: For short keys, the length of the key. Otherwise, a special constant + * indicating a big key whose length is stored with the key. + * + * Last field: an always on bit to insure the key reference non-zero when the offset and + * length are zero. + */ + + /* + * The absolute offset to the beginning of the key within the WriteBuffers. + */ + private final class AbsoluteKeyOffset { + private static final int bitLength = 40; + private static final long allBitsOn = (((long) 1) << bitLength) - 1; + private static final long bitMask = allBitsOn; + + // Make it a power of 2 by backing down (i.e. the -2). + private static final long maxSize = ((long) 1) << (bitLength - 2); + } + + /* + * The small key length. + * + * If the key is big (i.e. length >= allBitsOn), then the key length is stored in the + * WriteBuffers. + */ + private final class SmallKeyLength { + private static final int bitLength = 20; + private static final int allBitsOn = (1 << bitLength) - 1; + private static final int threshold = allBitsOn; // Lower this for big key testing. + private static final int bitShift = AbsoluteKeyOffset.bitLength; + private static final long bitMask = ((long) allBitsOn) << bitShift; + private static final long allBitsOnBitShifted = ((long) allBitsOn) << bitShift; + } + + /* + * An always on bit to insure the key reference non-zero. + */ + private final class IsNonZeroFlag { + private static final int bitShift = SmallKeyLength.bitShift + SmallKeyLength.bitLength;; + private static final long flagOnMask = ((long) 1) << bitShift; + } + + public long add(byte[] keyBytes, int keyStart, int keyLength) { + boolean isKeyLengthBig = (keyLength >= SmallKeyLength.threshold); + + long absoluteKeyOffset = writeBuffers.getWritePoint(); + if (isKeyLengthBig) { + writeBuffers.writeVInt(keyLength); + } + writeBuffers.write(keyBytes, keyStart, keyLength); + + long keyRefWord = IsNonZeroFlag.flagOnMask; + if (isKeyLengthBig) { + keyRefWord |= SmallKeyLength.allBitsOnBitShifted; + } else { + keyRefWord |= ((long) keyLength) << SmallKeyLength.bitShift; + } + keyRefWord |= absoluteKeyOffset; + + // LOG.info("VectorMapJoinFastKeyStore add keyLength " + keyLength + " absoluteKeyOffset " + absoluteKeyOffset + " keyRefWord " + Long.toHexString(keyRefWord)); + return keyRefWord; + } + + public boolean equalKey(long keyRefWord, byte[] keyBytes, int keyStart, int keyLength) { + + int storedKeyLengthLength = + (int) ((keyRefWord & SmallKeyLength.bitMask) >> SmallKeyLength.bitShift); + boolean isKeyLengthSmall = (storedKeyLengthLength != SmallKeyLength.allBitsOn); + + // LOG.info("VectorMapJoinFastKeyStore equalKey keyLength " + keyLength + " isKeyLengthSmall " + isKeyLengthSmall + " storedKeyLengthLength " + storedKeyLengthLength + " keyRefWord " + Long.toHexString(keyRefWord)); + + if (isKeyLengthSmall && storedKeyLengthLength != keyLength) { + return false; + } + long absoluteKeyOffset = + (keyRefWord & AbsoluteKeyOffset.bitMask); + + writeBuffers.setReadPoint(absoluteKeyOffset, readPos); + if (!isKeyLengthSmall) { + // Read big value length we wrote with the value. + storedKeyLengthLength = writeBuffers.readVInt(readPos); + if (storedKeyLengthLength != keyLength) { + // LOG.info("VectorMapJoinFastKeyStore equalKey no match big length"); + return false; + } + } + + // Our reading is positioned to the key. + writeBuffers.getByteSegmentRefToCurrent(byteSegmentRef, keyLength, readPos); + + byte[] currentBytes = byteSegmentRef.getBytes(); + int currentStart = (int) byteSegmentRef.getOffset(); + + for (int i = 0; i < keyLength; i++) { + if (currentBytes[currentStart + i] != keyBytes[keyStart + i]) { + // LOG.info("VectorMapJoinFastKeyStore equalKey no match on bytes"); + return false; + } + } + // LOG.info("VectorMapJoinFastKeyStore equalKey match on bytes"); + return true; + } + + public VectorMapJoinFastKeyStore(int writeBuffersSize) { + writeBuffers = new WriteBuffers(writeBuffersSize, AbsoluteKeyOffset.maxSize); + + byteSegmentRef = new WriteBuffers.ByteSegmentRef(); + readPos = new WriteBuffers.Position(); + } + + public VectorMapJoinFastKeyStore(WriteBuffers writeBuffers) { + // TODO: Check if maximum size compatible with AbsoluteKeyOffset.maxSize. + this.writeBuffers = writeBuffers; + + byteSegmentRef = new WriteBuffers.ByteSegmentRef(); + readPos = new WriteBuffers.Position(); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMap.java new file mode 100644 index 0000000..3a0b380 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMap.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMapResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinLongHashMap; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKeyType; +import org.apache.hadoop.io.BytesWritable; + +/* + * An single long value map optimized for vector map join. + */ +public class VectorMapJoinFastLongHashMap + extends VectorMapJoinFastLongHashTable + implements VectorMapJoinLongHashMap { + + public static final Log LOG = LogFactory.getLog(VectorMapJoinFastLongHashMap.class); + + protected VectorMapJoinFastValueStore valueStore; + + @Override + public VectorMapJoinHashMapResult createHashMapResult() { + return new VectorMapJoinFastValueStore.HashMapResult(); + } + + @Override + public void assignSlot(int slot, long key, boolean isNewKey, BytesWritable currentValue) { + + byte[] valueBytes = currentValue.getBytes(); + int valueLength = currentValue.getLength(); + + int pairIndex = 2 * slot; + if (isNewKey) { + // First entry. + slotPairs[pairIndex] = valueStore.addFirst(valueBytes, 0, valueLength); + slotPairs[pairIndex + 1] = key; + } else { + // Add another value. + slotPairs[pairIndex] = valueStore.addMore(slotPairs[pairIndex], valueBytes, 0, valueLength); + } + } + + @Override + public JoinUtil.JoinResult lookup(long key, VectorMapJoinHashMapResult hashMapResult) { + + VectorMapJoinFastValueStore.HashMapResult optimizedHashMapResult = + (VectorMapJoinFastValueStore.HashMapResult) hashMapResult; + + optimizedHashMapResult.forget(); + + long hashCode = VectorMapJoinFastLongHashUtil.hashKey(key); + // LOG.info("VectorMapJoinFastLongHashMap lookup " + key + " hashCode " + hashCode); + long valueRef = findReadSlot(key, hashCode); + JoinUtil.JoinResult joinResult; + if (valueRef == -1) { + joinResult = JoinUtil.JoinResult.NOMATCH; + } else { + optimizedHashMapResult.set(valueStore, valueRef); + + joinResult = JoinUtil.JoinResult.MATCH; + } + + optimizedHashMapResult.setJoinResult(joinResult); + + return joinResult; + } + + public VectorMapJoinFastLongHashMap( + boolean minMaxEnabled, boolean isOuterJoin, HashTableKeyType hashTableKeyType, + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(minMaxEnabled, isOuterJoin, hashTableKeyType, + initialCapacity, loadFactor, writeBuffersSize, memUsage); + valueStore = new VectorMapJoinFastValueStore(writeBuffersSize); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMultiSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMultiSet.java new file mode 100644 index 0000000..f9763e3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashMultiSet.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMultiSetResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinLongHashMultiSet; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKeyType; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.io.BytesWritable; + +/* + * An single long value multi-set optimized for vector map join. + */ +public class VectorMapJoinFastLongHashMultiSet + extends VectorMapJoinFastLongHashTable + implements VectorMapJoinLongHashMultiSet { + + public static final Log LOG = LogFactory.getLog(VectorMapJoinFastLongHashMultiSet.class); + + @Override + public VectorMapJoinHashMultiSetResult createHashMultiSetResult() { + return new VectorMapJoinFastHashMultiSet.HashMultiSetResult(); + } + + @Override + public void assignSlot(int slot, long key, boolean isNewKey, BytesWritable currentValue) { + + int pairIndex = 2 * slot; + if (isNewKey) { + // First entry. + slotPairs[pairIndex] = 1; // Count. + slotPairs[pairIndex + 1] = key; + } else { + // Add another value. + slotPairs[pairIndex]++; + } + } + + + @Override + public JoinUtil.JoinResult contains(long key, VectorMapJoinHashMultiSetResult hashMultiSetResult) { + + VectorMapJoinFastHashMultiSet.HashMultiSetResult optimizedHashMultiSetResult = + (VectorMapJoinFastHashMultiSet.HashMultiSetResult) hashMultiSetResult; + + optimizedHashMultiSetResult.forget(); + + long hashCode = VectorMapJoinFastLongHashUtil.hashKey(key); + long count = findReadSlot(key, hashCode); + JoinUtil.JoinResult joinResult; + if (count == -1) { + joinResult = JoinUtil.JoinResult.NOMATCH; + } else { + optimizedHashMultiSetResult.set(count); + joinResult = JoinUtil.JoinResult.MATCH; + } + + optimizedHashMultiSetResult.setJoinResult(joinResult); + + return joinResult; + } + + public VectorMapJoinFastLongHashMultiSet( + boolean minMaxEnabled, boolean isOuterJoin, HashTableKeyType hashTableKeyType, + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(minMaxEnabled, isOuterJoin, hashTableKeyType, + initialCapacity, loadFactor, writeBuffersSize, memUsage); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashSet.java new file mode 100644 index 0000000..cd23949 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashSet.java @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.JoinUtil.JoinResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashSetResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinLongHashSet; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKeyType; +import org.apache.hadoop.io.BytesWritable; + +/* + * An single long value multi-set optimized for vector map join. + */ +public class VectorMapJoinFastLongHashSet + extends VectorMapJoinFastLongHashTable + implements VectorMapJoinLongHashSet { + + public static final Log LOG = LogFactory.getLog(VectorMapJoinFastLongHashSet.class); + + @Override + public VectorMapJoinHashSetResult createHashSetResult() { + return new VectorMapJoinFastHashSet.HashSetResult(); + } + + @Override + public void assignSlot(int slot, long key, boolean isNewKey, BytesWritable currentValue) { + + int pairIndex = 2 * slot; + if (isNewKey) { + // First entry. + slotPairs[pairIndex] = 1; // Existence. + slotPairs[pairIndex + 1] = key; + } + } + + @Override + public JoinResult contains(long key, VectorMapJoinHashSetResult hashSetResult) { + + VectorMapJoinFastHashSet.HashSetResult optimizedHashSetResult = + (VectorMapJoinFastHashSet.HashSetResult) hashSetResult; + + optimizedHashSetResult.forget(); + + long hashCode = VectorMapJoinFastLongHashUtil.hashKey(key); + long existance = findReadSlot(key, hashCode); + JoinUtil.JoinResult joinResult; + if (existance == -1) { + joinResult = JoinUtil.JoinResult.NOMATCH; + } else { + joinResult = JoinUtil.JoinResult.MATCH; + } + + optimizedHashSetResult.setJoinResult(joinResult); + + return joinResult; + + } + + public VectorMapJoinFastLongHashSet( + boolean minMaxEnabled, boolean isOuterJoin, HashTableKeyType hashTableKeyType, + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(minMaxEnabled, isOuterJoin, hashTableKeyType, + initialCapacity, loadFactor, writeBuffersSize, memUsage); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java new file mode 100644 index 0000000..b448e1f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java @@ -0,0 +1,284 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMap; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinLongHashMap; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinLongHashTable; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKeyType; +import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableDeserializeRead; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.io.BytesWritable; +import org.apache.tez.runtime.library.api.KeyValueReader; + +import com.google.common.annotations.VisibleForTesting; + +/* + * An single long value map optimized for vector map join. + */ +public abstract class VectorMapJoinFastLongHashTable + extends VectorMapJoinFastHashTable + implements VectorMapJoinLongHashTable { + + public static final Log LOG = LogFactory.getLog(VectorMapJoinFastLongHashTable.class); + + private HashTableKeyType hashTableKeyType; + + private boolean isOuterJoin; + + private BinarySortableDeserializeRead keyBinarySortableDeserializeRead; + + private boolean useMinMax; + private long min; + private long max; + + private BytesWritable testValueBytesWritable; + + @Override + public boolean useMinMax() { + return useMinMax; + } + + @Override + public long min() { + return min; + } + + @Override + public long max() { + return max; + } + + @Override + public void putRow(BytesWritable currentKey, BytesWritable currentValue) throws HiveException, IOException { + byte[] keyBytes = currentKey.getBytes(); + int keyLength = currentKey.getLength(); + keyBinarySortableDeserializeRead.set(keyBytes, 0, keyLength); + if (keyBinarySortableDeserializeRead.readCheckNull()) { + if (isOuterJoin) { + return; + } else { + // For inner join, we expect all NULL values to have been filtered out before now. + throw new HiveException("Unexpected NULL in map join small table"); + } + } + + long key = VectorMapJoinFastLongHashUtil.deserializeLongKey( + keyBinarySortableDeserializeRead, hashTableKeyType); + + add(key, currentValue); + } + + + @VisibleForTesting + public void putRow(long currentKey, byte[] currentValue) throws HiveException, IOException { + if (testValueBytesWritable == null) { + testValueBytesWritable = new BytesWritable(); + } + testValueBytesWritable.set(currentValue, 0, currentValue.length); + add(currentKey, testValueBytesWritable); + } + + + protected abstract void assignSlot(int slot, long key, boolean isNewKey, BytesWritable currentValue); + + public void add(long key, BytesWritable currentValue) { + + if (resizeThreshold <= keysAssigned) { + expandAndRehash(); + } + + long hashCode = VectorMapJoinFastLongHashUtil.hashKey(key); + int intHashCode = (int) hashCode; + int slot = (intHashCode & logicalHashBucketMask); + long probeSlot = slot; + int i = 0; + boolean isNewKey; + while (true) { + int pairIndex = 2 * slot; + long valueRef = slotPairs[pairIndex]; + if (valueRef == 0) { + // LOG.info("VectorMapJoinFastLongHashTable add key " + key + " slot " + slot + " pairIndex " + pairIndex + " empty slot (i = " + i + ")"); + isNewKey = true; + break; + } + long tableKey = slotPairs[pairIndex + 1]; + if (key == tableKey) { + // LOG.info("VectorMapJoinFastLongHashTable add key " + key + " slot " + slot + " pairIndex " + pairIndex + " found key (i = " + i + ")"); + isNewKey = false; + break; + } + ++metricPutConflict; + // Some other key (collision) - keep probing. + probeSlot += (++i); + slot = (int)(probeSlot & logicalHashBucketMask); + } + + if (largestNumberOfSteps < i) { + if (LOG.isDebugEnabled()) { + LOG.debug("Probed " + i + " slots (the longest so far) to find space"); + } + largestNumberOfSteps = i; + // debugDumpKeyProbe(keyOffset, keyLength, hashCode, slot); + } + + // LOG.info("VectorMapJoinFastLongHashTable add slot " + slot + " hashCode " + Long.toHexString(hashCode)); + + assignSlot(slot, key, isNewKey, currentValue); + + if (isNewKey) { + keysAssigned++; + if (useMinMax) { + if (key < min) { + min = key; + } + if (key > max) { + max = key; + } + } + } + } + + private void expandAndRehash() { + + int newLogicalHashBucketCount = logicalHashBucketCount * 2; + int newLogicalHashBucketMask = newLogicalHashBucketCount - 1; + int newMetricPutConflict = 0; + int newLargestNumberOfSteps = 0; + + int newSlotPairArraySize = newLogicalHashBucketCount * 2; + long[] newSlotPairs = new long[newSlotPairArraySize]; + + for (int slot = 0; slot < logicalHashBucketCount; slot++) { + int pairIndex = slot * 2; + long valueRef = slotPairs[pairIndex]; + if (valueRef != 0) { + long tableKey = slotPairs[pairIndex + 1]; + + // Copy to new slot table. + long hashCode = VectorMapJoinFastLongHashUtil.hashKey(tableKey); + int intHashCode = (int) hashCode; + int newSlot = intHashCode & newLogicalHashBucketMask; + long newProbeSlot = newSlot; + int newPairIndex; + int i = 0; + while (true) { + newPairIndex = newSlot * 2; + long newValueRef = newSlotPairs[newPairIndex]; + if (newValueRef == 0) { + break; + } + ++newMetricPutConflict; + // Some other key (collision) - keep probing. + newProbeSlot += (++i); + newSlot = (int)(newProbeSlot & newLogicalHashBucketMask); + } + + if (newLargestNumberOfSteps < i) { + if (LOG.isDebugEnabled()) { + LOG.debug("Probed " + i + " slots (the longest so far) to find space"); + } + newLargestNumberOfSteps = i; + // debugDumpKeyProbe(keyOffset, keyLength, hashCode, slot); + } + + // Use old value reference word. + // LOG.info("VectorMapJoinFastLongHashTable expandAndRehash key " + tableKey + " slot " + newSlot + " newPairIndex " + newPairIndex + " empty slot (i = " + i + ")"); + + newSlotPairs[newPairIndex] = valueRef; + newSlotPairs[newPairIndex + 1] = tableKey; + } + } + + slotPairs = newSlotPairs; + logicalHashBucketCount = newLogicalHashBucketCount; + logicalHashBucketMask = newLogicalHashBucketMask; + metricPutConflict = newMetricPutConflict; + largestNumberOfSteps = newLargestNumberOfSteps; + resizeThreshold = (int)(logicalHashBucketCount * loadFactor); + metricExpands++; + // LOG.info("VectorMapJoinFastLongHashTable expandAndRehash new logicalHashBucketCount " + logicalHashBucketCount + " resizeThreshold " + resizeThreshold + " metricExpands " + metricExpands); + } + + protected long findReadSlot(long key, long hashCode) { + + int intHashCode = (int) hashCode; + int slot = intHashCode & logicalHashBucketMask; + + long probeSlot = slot; + int i = 0; + while (true) { + int pairIndex = 2 * slot; + long valueRef = slotPairs[pairIndex]; + if (valueRef == 0) { + // Given that we do not delete, an empty slot means no match. + // LOG.info("VectorMapJoinFastLongHashTable findReadSlot key " + key + " slot " + slot + " pairIndex " + pairIndex + " empty slot (i = " + i + ")"); + return -1; + } + long tableKey = slotPairs[pairIndex + 1]; + if (key == tableKey) { + // LOG.info("VectorMapJoinFastLongHashTable findReadSlot key " + key + " slot " + slot + " pairIndex " + pairIndex + " found key (i = " + i + ")"); + return slotPairs[pairIndex]; + } + // Some other key (collision) - keep probing. + probeSlot += (++i); + if (i > largestNumberOfSteps) { + // LOG.info("VectorMapJoinFastLongHashTable findReadSlot returning not found"); + // We know we never went that far when we were inserting. + // LOG.info("VectorMapJoinFastLongHashTable findReadSlot key " + key + " slot " + slot + " pairIndex " + pairIndex + " largestNumberOfSteps " + largestNumberOfSteps + " (i = " + i + ")"); + return -1; + } + slot = (int)(probeSlot & logicalHashBucketMask); + } + } + + /* + * The hash table slots. For a long key hash table, each slot is 2 longs and the array is + * 2X sized. + * + * The slot pair is 1) a non-zero reference word to the first value bytes and 2) the long value. + */ + protected long[] slotPairs; + + private void allocateBucketArray() { + int slotPairArraySize = 2 * logicalHashBucketCount; + slotPairs = new long[slotPairArraySize]; + } + + public VectorMapJoinFastLongHashTable( + boolean minMaxEnabled, boolean isOuterJoin, HashTableKeyType hashTableKeyType, + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(initialCapacity, loadFactor, writeBuffersSize, memUsage); + this.isOuterJoin = isOuterJoin; + this.hashTableKeyType = hashTableKeyType; + PrimitiveTypeInfo[] primitiveTypeInfos = { TypeInfoFactory.longTypeInfo }; + keyBinarySortableDeserializeRead = new BinarySortableDeserializeRead(primitiveTypeInfos); + allocateBucketArray(); + useMinMax = minMaxEnabled; + min = Long.MAX_VALUE; + max = Long.MIN_VALUE; + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashUtil.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashUtil.java new file mode 100644 index 0000000..298ca61 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashUtil.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKeyType; +import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableDeserializeRead; + +public class VectorMapJoinFastLongHashUtil { + + public static long hashKey(long key) { + key = (~key) + (key << 21); // key = (key << 21) - key - 1; + key = key ^ (key >>> 24); + key = (key + (key << 3)) + (key << 8); // key * 265 + key = key ^ (key >>> 14); + key = (key + (key << 2)) + (key << 4); // key * 21 + key = key ^ (key >>> 28); + key = key + (key << 31); + return key; + } + + public static long deserializeLongKey(BinarySortableDeserializeRead keyBinarySortableDeserializeRead, + HashTableKeyType hashTableKeyType) throws IOException { + long key = 0; + switch (hashTableKeyType) { + case BOOLEAN: + key = (keyBinarySortableDeserializeRead.readBoolean() ? 1 : 0); + break; + case BYTE: + key = (long) keyBinarySortableDeserializeRead.readByte(); + break; + case SHORT: + key = (long) keyBinarySortableDeserializeRead.readShort(); + break; + case INT: + key = (long) keyBinarySortableDeserializeRead.readInt(); + break; + case LONG: + key = keyBinarySortableDeserializeRead.readLong(); + break; + default: + throw new RuntimeException("Unexpected hash table key type " + hashTableKeyType.name()); + } + return key; + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMap.java new file mode 100644 index 0000000..b962475 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMap.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import com.google.common.annotations.VisibleForTesting; + +/* + * An multi-key value hash map optimized for vector map join. + */ +public class VectorMapJoinFastMultiKeyHashMap + extends VectorMapJoinFastBytesHashMap { + + @VisibleForTesting + public VectorMapJoinFastMultiKeyHashMap(int initialCapacity, float loadFactor, int wbSize) { + this(false, initialCapacity, loadFactor, wbSize, -1); + } + + public VectorMapJoinFastMultiKeyHashMap( + boolean isOuterJoin, + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(initialCapacity, loadFactor, writeBuffersSize, memUsage); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMultiSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMultiSet.java new file mode 100644 index 0000000..71a62fe --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashMultiSet.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +/* + * An multi-key value hash multi-set optimized for vector map join. + */ +public class VectorMapJoinFastMultiKeyHashMultiSet + extends VectorMapJoinFastBytesHashMultiSet { + + public VectorMapJoinFastMultiKeyHashMultiSet( + boolean isOuterJoin, + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(initialCapacity, loadFactor, writeBuffersSize, memUsage); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashSet.java new file mode 100644 index 0000000..dad3b32 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastMultiKeyHashSet.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +/* + * An multi-key value hash set optimized for vector map join. + */ +public class VectorMapJoinFastMultiKeyHashSet + extends VectorMapJoinFastBytesHashSet { + + public VectorMapJoinFastMultiKeyHashSet( + boolean isOuterJoin, + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(initialCapacity, loadFactor, writeBuffersSize, memUsage); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringCommon.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringCommon.java new file mode 100644 index 0000000..5c7792f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringCommon.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableDeserializeRead; +import org.apache.hadoop.hive.serde2.fast.DeserializeRead.ReadStringResults; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.io.BytesWritable; + +/* + * An single byte array value hash map optimized for vector map join. + */ +public class VectorMapJoinFastStringCommon { + + private boolean isOuterJoin; + + private BinarySortableDeserializeRead keyBinarySortableDeserializeRead; + + private ReadStringResults readStringResults; + + public void adaptPutRow(VectorMapJoinFastBytesHashTable hashTable, + BytesWritable currentKey, BytesWritable currentValue) throws HiveException, IOException { + + byte[] keyBytes = currentKey.getBytes(); + int keyLength = currentKey.getLength(); + keyBinarySortableDeserializeRead.set(keyBytes, 0, keyLength); + if (keyBinarySortableDeserializeRead.readCheckNull()) { + if (isOuterJoin) { + return; + } else { + // For inner join, we expect all NULL values to have been filtered out before now. + throw new HiveException("Unexpected NULL in map join small table"); + } + } + keyBinarySortableDeserializeRead.readString(readStringResults); + + hashTable.add(readStringResults.bytes, readStringResults.start, readStringResults.length, + currentValue); + } + + public VectorMapJoinFastStringCommon(boolean isOuterJoin) { + this.isOuterJoin = isOuterJoin; + PrimitiveTypeInfo[] primitiveTypeInfos = { TypeInfoFactory.stringTypeInfo }; + keyBinarySortableDeserializeRead = new BinarySortableDeserializeRead(primitiveTypeInfos); + readStringResults = keyBinarySortableDeserializeRead.createReadStringResults(); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMap.java new file mode 100644 index 0000000..c80ea89 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMap.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.io.BytesWritable; + +/* + * An single byte array value hash map optimized for vector map join. + */ +public class VectorMapJoinFastStringHashMap extends VectorMapJoinFastBytesHashMap { + + private VectorMapJoinFastStringCommon stringCommon; + + @Override + public void putRow(BytesWritable currentKey, BytesWritable currentValue) throws HiveException, IOException { + stringCommon.adaptPutRow(this, currentKey, currentValue); + } + + public VectorMapJoinFastStringHashMap( + boolean isOuterJoin, + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(initialCapacity, loadFactor, writeBuffersSize, memUsage); + stringCommon = new VectorMapJoinFastStringCommon(isOuterJoin); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMultiSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMultiSet.java new file mode 100644 index 0000000..4933b16 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashMultiSet.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.io.BytesWritable; + +/* + * An single byte array value hash map optimized for vector map join. + */ +public class VectorMapJoinFastStringHashMultiSet extends VectorMapJoinFastBytesHashMultiSet { + + private VectorMapJoinFastStringCommon stringCommon; + + @Override + public void putRow(BytesWritable currentKey, BytesWritable currentValue) throws HiveException, IOException { + stringCommon.adaptPutRow(this, currentKey, currentValue); + } + + public VectorMapJoinFastStringHashMultiSet( + boolean isOuterJoin, + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(initialCapacity, loadFactor, writeBuffersSize, memUsage); + stringCommon = new VectorMapJoinFastStringCommon(isOuterJoin); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashSet.java new file mode 100644 index 0000000..ae8d943 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringHashSet.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.io.BytesWritable; + +/* + * An single byte array value hash map optimized for vector map join. + */ +public class VectorMapJoinFastStringHashSet extends VectorMapJoinFastBytesHashSet { + + private VectorMapJoinFastStringCommon stringCommon; + + @Override + public void putRow(BytesWritable currentKey, BytesWritable currentValue) throws HiveException, IOException { + stringCommon.adaptPutRow(this, currentKey, currentValue); + } + + public VectorMapJoinFastStringHashSet( + boolean isOuterJoin, + int initialCapacity, float loadFactor, int writeBuffersSize, long memUsage) { + super(initialCapacity, loadFactor, writeBuffersSize, memUsage); + stringCommon = new VectorMapJoinFastStringCommon(isOuterJoin); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java new file mode 100644 index 0000000..0482324 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java @@ -0,0 +1,226 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.persistence.HashMapWrapper; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinObjectSerDeContext; +import org.apache.hadoop.hive.ql.exec.tez.HashTableLoader; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTable; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinTableContainer; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.MapJoinDesc; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableImplementationType; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKeyType; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKind; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.Writable; +import org.apache.tez.runtime.library.api.KeyValueReader; + +/** + * HashTableLoader for Tez constructs the hashtable from records read from + * a broadcast edge. + */ +public class VectorMapJoinFastTableContainer implements VectorMapJoinTableContainer { + + private static final Log LOG = LogFactory.getLog(HashTableLoader.class.getName()); + + private MapJoinDesc desc; + private Configuration hconf; + + private float keyCountAdj; + private int threshold; + private float loadFactor; + private int wbSize; + private long keyCount; + private long memUsage; + + + private VectorMapJoinFastHashTable VectorMapJoinFastHashTable; + + public VectorMapJoinFastTableContainer(MapJoinDesc desc, Configuration hconf, + long keyCount, long memUsage) throws SerDeException { + + this.desc = desc; + this.hconf = hconf; + + keyCountAdj = HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEKEYCOUNTADJUSTMENT); + threshold = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD); + loadFactor = HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR); + wbSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEWBSIZE); + + this.keyCount = keyCount; + this.memUsage = memUsage; + + // LOG.info("VectorMapJoinFastTableContainer load keyCountAdj " + keyCountAdj); + // LOG.info("VectorMapJoinFastTableContainer load threshold " + threshold); + // LOG.info("VectorMapJoinFastTableContainer load loadFactor " + loadFactor); + // LOG.info("VectorMapJoinFastTableContainer load wbSize " + wbSize); + // LOG.info("VectorMapJoinFastTableContainer load memUsage " + memUsage); + + int newThreshold = HashMapWrapper.calculateTableSize( + keyCountAdj, threshold, loadFactor, keyCount); + + // LOG.info("VectorMapJoinFastTableContainer load newThreshold " + newThreshold); + + VectorMapJoinFastHashTable = createHashTable(newThreshold); + } + + @Override + public VectorMapJoinHashTable vectorMapJoinHashTable() { + return (VectorMapJoinHashTable) VectorMapJoinFastHashTable; + } + + private VectorMapJoinFastHashTable createHashTable(int newThreshold) { + + boolean isOuterJoin = !desc.isNoOuterJoin(); + VectorMapJoinDesc vectorDesc = desc.getVectorDesc(); + HashTableImplementationType hashTableImplementationType = vectorDesc.hashTableImplementationType(); + HashTableKind hashTableKind = vectorDesc.hashTableKind(); + HashTableKeyType hashTableKeyType = vectorDesc.hashTableKeyType(); + boolean minMaxEnabled = vectorDesc.minMaxEnabled(); + + int writeBufferSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEWBSIZE); + + VectorMapJoinFastHashTable hashTable = null; + + switch (hashTableKeyType) { + case BOOLEAN: + case BYTE: + case SHORT: + case INT: + case LONG: + switch (hashTableKind) { + case HASH_MAP: + hashTable = new VectorMapJoinFastLongHashMap( + minMaxEnabled, isOuterJoin, hashTableKeyType, + newThreshold, loadFactor, writeBufferSize, memUsage); + break; + case HASH_MULTISET: + hashTable = new VectorMapJoinFastLongHashMultiSet( + minMaxEnabled, isOuterJoin, hashTableKeyType, + newThreshold, loadFactor, writeBufferSize, memUsage); + break; + case HASH_SET: + hashTable = new VectorMapJoinFastLongHashSet( + minMaxEnabled, isOuterJoin, hashTableKeyType, + newThreshold, loadFactor, writeBufferSize, memUsage); + break; + } + break; + + case STRING: + switch (hashTableKind) { + case HASH_MAP: + hashTable = new VectorMapJoinFastStringHashMap( + isOuterJoin, + newThreshold, loadFactor, writeBufferSize, memUsage); + break; + case HASH_MULTISET: + hashTable = new VectorMapJoinFastStringHashMultiSet( + isOuterJoin, + newThreshold, loadFactor, writeBufferSize, memUsage); + break; + case HASH_SET: + hashTable = new VectorMapJoinFastStringHashSet( + isOuterJoin, + newThreshold, loadFactor, writeBufferSize, memUsage); + break; + } + break; + + case MULTI_KEY: + switch (hashTableKind) { + case HASH_MAP: + hashTable = new VectorMapJoinFastMultiKeyHashMap( + isOuterJoin, + newThreshold, loadFactor, writeBufferSize, memUsage); + break; + case HASH_MULTISET: + hashTable = new VectorMapJoinFastMultiKeyHashMultiSet( + isOuterJoin, + newThreshold, loadFactor, writeBufferSize, memUsage); + break; + case HASH_SET: + hashTable = new VectorMapJoinFastMultiKeyHashSet( + isOuterJoin, + newThreshold, loadFactor, writeBufferSize, memUsage); + break; + } + break; + } + + return hashTable; + } + + public void putRow(KeyValueReader kvReader) throws HiveException, IOException, SerDeException { + VectorMapJoinFastHashTable.putRow((BytesWritable) kvReader.getCurrentKey(), (BytesWritable) kvReader.getCurrentValue()); + } + + @Override + public MapJoinKey putRow(MapJoinObjectSerDeContext keyContext, + Writable currentKey, MapJoinObjectSerDeContext valueContext, + Writable currentValue) throws SerDeException, HiveException { + throw new UnsupportedOperationException("Not applicable"); + } + + @Override + public void seal() { + // Do nothing + } + + @Override + public ReusableGetAdaptor createGetter(MapJoinKey keyTypeFromLoader) { + throw new RuntimeException("Not applicable"); + } + + @Override + public void clear() { + throw new RuntimeException("Not applicable"); + } + + @Override + public MapJoinKey getAnyKey() { + throw new RuntimeException("Not applicable"); + } + + @Override + public void dumpMetrics() { + // TODO + } + + @Override + public boolean hasSpill() { + return false; + } + + /* + @Override + public com.esotericsoftware.kryo.io.Output getHybridBigTableSpillOutput(int partitionId) { + throw new RuntimeException("Not applicable"); + } + */ +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastValueStore.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastValueStore.java new file mode 100644 index 0000000..5b32c47 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastValueStore.java @@ -0,0 +1,484 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMapResult; +import org.apache.hadoop.hive.serde2.WriteBuffers; +import org.apache.hadoop.hive.serde2.WriteBuffers.ByteSegmentRef; +import org.apache.hadoop.hive.serde2.WriteBuffers.Position;; + + +// Supports random access. + +public class VectorMapJoinFastValueStore { + + private static final Log LOG = LogFactory.getLog(VectorMapJoinFastValueStore.class.getName()); + + private WriteBuffers writeBuffers; + + public WriteBuffers writeBuffers() { + return writeBuffers; + } + + public static class HashMapResult extends VectorMapJoinHashMapResult { + + private VectorMapJoinFastValueStore valueStore; + + private boolean hasRows; + private long valueRefWord; + private boolean isSingleRow; + private int cappedCount; + + private boolean haveReadCurrent; + private int readIndex; + private boolean isEof; + + private boolean isNextEof; + private boolean isNextLast; + long nextAbsoluteValueOffset; + boolean isNextValueLengthSmall; + int nextSmallValueLength; + + private ByteSegmentRef byteSegmentRef; + private Position readPos; + + public HashMapResult() { + super(); + valueRefWord = -1; + hasRows = false; + byteSegmentRef = new ByteSegmentRef(); + readPos = new Position(); + } + + public void set(VectorMapJoinFastValueStore valueStore, long valueRefWord) { + // LOG.info("VectorMapJoinFastValueStore set valueRefWord " + Long.toHexString(valueRefWord)); + + this.valueStore = valueStore; + this.valueRefWord = valueRefWord; + + hasRows = true; + isSingleRow = ((valueRefWord & IsLastFlag.flagOnMask) != 0); + cappedCount = + (int) ((valueRefWord & CappedCount.bitMask) >> CappedCount.bitShift); + // Position to beginning. + haveReadCurrent = false; + readIndex = 0; + isEof = false; + } + + @Override + public boolean hasRows() { + return hasRows; + } + + @Override + public boolean isSingleRow() { + if (!hasRows) { + return false; + } + + return isSingleRow; + } + + @Override + public boolean isCappedCountAvailable() { + return true; + } + + @Override + public int cappedCount() { + if (!hasRows) { + return 0; + } + + return cappedCount; + } + + @Override + public ByteSegmentRef first() { + if (!hasRows) { + return null; + } + + // Position to beginning. + haveReadCurrent = false; + readIndex = 0; + isEof = false; + + return internalRead(); + } + + @Override + public ByteSegmentRef next() { + if (!hasRows) { + return null; + } + + return internalRead(); + } + + + public ByteSegmentRef internalRead() { + + long absoluteValueOffset; + + int valueLength; + + if (readIndex == 0) { + /* + * Extract information from reference word from slot table. + */ + absoluteValueOffset = + (valueRefWord & AbsoluteValueOffset.bitMask); + + // Position before the last written value. + valueStore.writeBuffers.setReadPoint(absoluteValueOffset, readPos); + + if (isSingleRow) { + isNextEof = true; + + valueLength = + (int) ((valueRefWord & SmallValueLength.bitMask) >> SmallValueLength.bitShift); + boolean isValueLengthSmall = (valueLength != SmallValueLength.allBitsOn); + if (!isValueLengthSmall) { + // And, if current value is big we must read it. + valueLength = valueStore.writeBuffers.readVInt(readPos); + } + } else { + isNextEof = false; + + // 2nd and beyond records have a relative offset word at the beginning. + long relativeOffsetWord = valueStore.writeBuffers.readVLong(readPos); + + long relativeOffset = + (relativeOffsetWord & NextRelativeValueOffset.bitMask) >> NextRelativeValueOffset.bitShift; + + nextAbsoluteValueOffset = absoluteValueOffset - relativeOffset; + + isNextLast = ((relativeOffsetWord & IsNextValueLastFlag.flagOnMask) != 0); + isNextValueLengthSmall = + ((relativeOffsetWord & IsNextValueLengthSmallFlag.flagOnMask) != 0); + } + + valueLength = + (int) ((valueRefWord & SmallValueLength.bitMask) >> SmallValueLength.bitShift); + boolean isValueLengthSmall = (valueLength != SmallValueLength.allBitsOn); + if (!isValueLengthSmall) { + // And, if current value is big we must read it. + valueLength = valueStore.writeBuffers.readVInt(readPos); + } + + // 2nd and beyond have the next value's small length in the current record. + if (isNextValueLengthSmall) { + nextSmallValueLength = valueStore.writeBuffers.readVInt(readPos); + } else { + nextSmallValueLength = -1; + } + + } else { + if (isNextEof) { + return null; + } + + absoluteValueOffset = nextAbsoluteValueOffset; + + // Position before the last written value. + valueStore.writeBuffers.setReadPoint(absoluteValueOffset, readPos); + + if (isNextLast) { + isNextEof = true; + + if (isNextValueLengthSmall) { + valueLength = nextSmallValueLength; + } else { + valueLength = (int) valueStore.writeBuffers.readVLong(readPos); + } + } else { + isNextEof = false; + + // 2nd and beyond records have a relative offset word at the beginning. + long relativeOffsetWord = valueStore.writeBuffers.readVLong(readPos); + + // Read current value's big length now, if necessary. + if (isNextValueLengthSmall) { + valueLength = nextSmallValueLength; + } else { + valueLength = (int) valueStore.writeBuffers.readVLong(readPos); + } + + long relativeOffset = + (relativeOffsetWord & NextRelativeValueOffset.bitMask) >> NextRelativeValueOffset.bitShift; + + nextAbsoluteValueOffset = absoluteValueOffset - relativeOffset; + + isNextLast = ((relativeOffsetWord & IsNextValueLastFlag.flagOnMask) != 0); + isNextValueLengthSmall = + ((relativeOffsetWord & IsNextValueLengthSmallFlag.flagOnMask) != 0); + if (isNextValueLengthSmall) { + // TODO: Write readVInt + nextSmallValueLength = (int) valueStore.writeBuffers.readVLong(readPos); + } else { + nextSmallValueLength = -1; + } + } + } + + // Our reading is positioned to the value. + valueStore.writeBuffers.getByteSegmentRefToCurrent(byteSegmentRef, valueLength, readPos); + + readIndex++; + return byteSegmentRef; + } + + @Override + public boolean isEof() { + if (!hasRows) { + return true; + } + return isEof; + } + + @Override + public void forget() { + } + + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("(" + super.toString() + ", "); + sb.append("cappedCount " + cappedCount() + ")"); + return sb.toString(); + } + } + + /** + * Bit-length fields within a 64-bit (long) value reference. + * + * Lowest field: An absolute byte offset the value in the WriteBuffers. + * + * 2nd field: For short values, the length of the value. Otherwise, a special constant + * indicating a big value whose length is stored with the value. + * + * 3rd field: A value count, up to a limit (a cap). Have a count helps the join result + * algorithms determine which optimization to use for M x N result cross products. + * A special constant indicates if the value count is >= the cap. + * + * Last field: an bit indicating whether there is only one value. + */ + + // Lowest field. + private final class AbsoluteValueOffset { + private static final int bitLength = 40; + private static final long allBitsOn = (1L << bitLength) - 1; + private static final long bitMask = allBitsOn; + + // Make it a power of 2. + private static final long maxSize = 1L << (bitLength - 2); + } + + private final class SmallValueLength { + private static final int bitLength = 10; + private static final int allBitsOn = (1 << bitLength) - 1; + private static final int threshold = allBitsOn; // Lower this for big value testing. + private static final int bitShift = AbsoluteValueOffset.bitLength; + private static final long bitMask = ((long) allBitsOn) << bitShift; + private static final long allBitsOnBitShifted = ((long) allBitsOn) << bitShift; + } + + private final class CappedCount { + private static final int bitLength = 10; + private static final int allBitsOn = (1 << bitLength) - 1; + private static final int limit = allBitsOn; + private static final int bitShift = SmallValueLength.bitShift + SmallValueLength.bitLength; + private static final long bitMask = ((long) allBitsOn) << bitShift; + } + + private final class IsLastFlag { + private static final int bitShift = CappedCount.bitShift + CappedCount.bitLength;; + private static final long flagOnMask = 1L << bitShift; + } + + // This bit should not be on for valid value references. We use -1 for a no value marker. + private final class IsInvalidFlag { + private static final int bitShift = 63; + private static final long flagOnMask = 1L << bitShift; + } + + /** + * Relative Offset Word stored at the beginning of all but the last value that has a + * relative offset and 2 flags. + * + * We put the flags at the low end of the word so the variable length integer will + * encode smaller. + * + * First bit is a flag indicating if the next value (not the current value) has a small length. + * When the first value is added and it has a small length, that length is stored in the + * value reference and not with the value. So, when we have multiple values, we need a way to + * know to keep the next value's small length with the current value. + * + * Second bit is a flag indicating if the next value (not the current value) is the last value. + * + * The relative offset *backwards* to the next value. + */ + + private final class IsNextValueLengthSmallFlag { + private static final int bitLength = 1; + private static final long flagOnMask = 1L; + } + + private final class IsNextValueLastFlag { + private static final int bitLength = 1; + private static final int bitShift = IsNextValueLengthSmallFlag.bitLength; + private static final long flagOnMask = 1L << bitShift; + } + + private final class NextRelativeValueOffset { + private static final int bitLength = 40; + private static final long allBitsOn = (1L << bitLength) - 1; + private static final int bitShift = IsNextValueLastFlag.bitShift + IsNextValueLastFlag.bitLength; + private static final long bitMask = allBitsOn << bitShift; + } + + public long addFirst(byte[] valueBytes, int valueStart, int valueLength) { + + // First value is written without: next relative offset, next value length, is next value last + // flag, is next value length small flag, etc. + + /* + * We build up the Value Reference Word we will return that will be kept by the caller. + */ + + long valueRefWord = IsLastFlag.flagOnMask; + + valueRefWord |= ((long) 1 << CappedCount.bitShift); + + long newAbsoluteOffset; + if (valueLength < SmallValueLength.threshold) { + + // Small case: Just write the value bytes only. + + if (valueLength == 0) { + // We don't write a first empty value. + // Get an offset to reduce the relative offset later if there are more than 1 value. + newAbsoluteOffset = writeBuffers.getWritePoint(); + } else { + newAbsoluteOffset = writeBuffers.getWritePoint(); + writeBuffers.write(valueBytes, valueStart, valueLength); + } + + // The caller remembers the small value length. + valueRefWord |= ((long) valueLength) << SmallValueLength.bitShift; + } else { + + // Big case: write the length as a VInt and then the value bytes. + + newAbsoluteOffset = writeBuffers.getWritePoint(); + + writeBuffers.writeVInt(valueLength); + writeBuffers.write(valueBytes, valueStart, valueLength); + + // Use magic length value to indicate big. + valueRefWord |= SmallValueLength.allBitsOnBitShifted; + } + + // LOG.info("VectorMapJoinFastValueStore addFirst valueLength " + valueLength + " newAbsoluteOffset " + newAbsoluteOffset + " valueRefWord " + Long.toHexString(valueRefWord)); + + // The lower bits are the absolute value offset. + valueRefWord |= newAbsoluteOffset; + + return valueRefWord; + } + + public long addMore(long oldValueRef, byte[] valueBytes, int valueStart, int valueLength) { + + if ((oldValueRef & IsInvalidFlag.flagOnMask) != 0) { + throw new RuntimeException("Invalid optimized hash table reference"); + } + /* + * Extract information about the old value. + */ + long oldAbsoluteValueOffset = + (oldValueRef & AbsoluteValueOffset.bitMask); + int oldSmallValueLength = + (int) ((oldValueRef & SmallValueLength.bitMask) >> SmallValueLength.bitShift); + boolean isOldValueLengthSmall = (oldSmallValueLength != SmallValueLength.allBitsOn); + int oldCappedCount = + (int) ((oldValueRef & CappedCount.bitMask) >> CappedCount.bitShift); + boolean isOldValueLast = + ((oldValueRef & IsLastFlag.flagOnMask) != 0); + + // LOG.info("VectorMapJoinFastValueStore addMore isOldValueLast " + isOldValueLast + " oldSmallValueLength " + oldSmallValueLength + " oldAbsoluteValueOffset " + oldAbsoluteValueOffset + " oldValueRef " + Long.toHexString(oldValueRef)); + + /* + * Write information about the old value (which becomes our next) at the beginning + * of our new value. + */ + long newAbsoluteOffset = writeBuffers.getWritePoint(); + + long relativeOffsetWord = 0; + if (isOldValueLengthSmall) { + relativeOffsetWord |= IsNextValueLengthSmallFlag.flagOnMask; + } + if (isOldValueLast) { + relativeOffsetWord |= IsNextValueLastFlag.flagOnMask; + } + int newCappedCount = oldCappedCount; + if (newCappedCount < CappedCount.limit) { + newCappedCount++; + } + long relativeOffset = newAbsoluteOffset - oldAbsoluteValueOffset; + relativeOffsetWord |= (relativeOffset << NextRelativeValueOffset.bitShift); + + writeBuffers.writeVLong(relativeOffsetWord); + + // When the next value is small it was not recorded with the old (i.e. next) value and we + // have to remember it. + if (isOldValueLengthSmall) { + writeBuffers.writeVInt(oldSmallValueLength); + } + + // Now, we have written all information about the next value, work on the *new* value. + + long newValueRef = ((long) newCappedCount) << CappedCount.bitShift; + boolean isNewValueSmall = (valueLength < SmallValueLength.threshold); + if (!isNewValueSmall) { + // Use magic value to indicating we are writing the big value length. + newValueRef |= ((long) SmallValueLength.allBitsOn << SmallValueLength.bitShift); + writeBuffers.writeVInt(valueLength); + } else { + // Caller must remember small value length. + newValueRef |= ((long) valueLength) << SmallValueLength.bitShift; + } + writeBuffers.write(valueBytes, valueStart, valueLength); + + // The lower bits are the absolute value offset. + newValueRef |= newAbsoluteOffset; + + // LOG.info("VectorMapJoinFastValueStore addMore valueLength " + valueLength + " newAbsoluteOffset " + newAbsoluteOffset + " newValueRef " + Long.toHexString(newValueRef)); + + return newValueRef; + } + + public VectorMapJoinFastValueStore(int writeBuffersSize) { + writeBuffers = new WriteBuffers(writeBuffersSize, AbsoluteValueOffset.maxSize); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinBytesHashMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinBytesHashMap.java new file mode 100644 index 0000000..512db1b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinBytesHashMap.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.JoinUtil; + +/* + * The interface for a single byte array key hash map lookup method. + */ +public interface VectorMapJoinBytesHashMap + extends VectorMapJoinBytesHashTable, VectorMapJoinHashMap { + + /* + * Lookup a byte array key in the hash map. + * + * @param keyBytes + * A byte array containing the key within a range. + * @param keyStart + * The offset the beginning of the key. + * @param keyLength + * The length of the key. + * @param hashMapResult + * The object to receive small table value(s) information on a MATCH. + * Or, for SPILL, it has information on where to spill the big table row. + * + * @return + * Whether the lookup was a match, no match, or spill (the partition with the key + * is currently spilled). + */ + JoinUtil.JoinResult lookup(byte[] keyBytes, int keyStart, int keyLength, + VectorMapJoinHashMapResult hashMapResult) throws IOException; + +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinBytesHashMultiSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinBytesHashMultiSet.java new file mode 100644 index 0000000..196403d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinBytesHashMultiSet.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.JoinUtil; + +/* + * The interface for a single byte array key hash multi-set contains method. + */ +public interface VectorMapJoinBytesHashMultiSet + extends VectorMapJoinBytesHashTable, VectorMapJoinHashMultiSet { + + /* + * Lookup an byte array key in the hash multi-set. + * + * @param keyBytes + * A byte array containing the key within a range. + * @param keyStart + * The offset the beginning of the key. + * @param keyLength + * The length of the key. + * @param hashMultiSetResult + * The object to receive small table value(s) information on a MATCH. + * Or, for SPILL, it has information on where to spill the big table row. + * + * @return + * Whether the lookup was a match, no match, or spilled (the partition with the key + * is currently spilled). + */ + JoinUtil.JoinResult contains(byte[] keyBytes, int keyStart, int keyLength, + VectorMapJoinHashMultiSetResult hashMultiSetResult) throws IOException; + +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinBytesHashSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinBytesHashSet.java new file mode 100644 index 0000000..a0c93e5 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinBytesHashSet.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.JoinUtil; + +/* + * The interface for a single byte array key hash multi-set contains method. + */ +public interface VectorMapJoinBytesHashSet + extends VectorMapJoinBytesHashTable, VectorMapJoinHashSet { + + /* + * Lookup an byte array key in the hash set. + * + * @param keyBytes + * A byte array containing the key within a range. + * @param keyStart + * The offset the beginning of the key. + * @param keyLength + * The length of the key. + * @param hashSetResult + * The object to receive small table value(s) information on a MATCH. + * Or, for SPILL, it has information on where to spill the big table row. + * + * @return + * Whether the lookup was a match, no match, or spilled (the partition with the key + * is currently spilled). + */ + JoinUtil.JoinResult contains(byte[] keyBytes, int keyStart, int keyLength, + VectorMapJoinHashSetResult hashSetResult) throws IOException; + +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinBytesHashTable.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinBytesHashTable.java new file mode 100644 index 0000000..7494e1d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinBytesHashTable.java @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +/* + * Interface for a vector map join hash table (which could be a hash map, hash multi-set, or + * hash set) for a single byte array key. + */ +public interface VectorMapJoinBytesHashTable extends VectorMapJoinHashTable { +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashMap.java new file mode 100644 index 0000000..7abe2c8 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashMap.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +/* + * The root interface for a vector map join hash map. + */ +public interface VectorMapJoinHashMap extends VectorMapJoinHashTable { + + /* + * @return A new hash map result implementation specific object. + * + * The object can be used to access the values when there is a match, or + * access spill information when the partition with the key is currently spilled. + */ + VectorMapJoinHashMapResult createHashMapResult(); + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashMapResult.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashMapResult.java new file mode 100644 index 0000000..fa6dedb --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashMapResult.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +import org.apache.hadoop.hive.serde2.WriteBuffers.ByteSegmentRef; + +/* + * Abstract class for a hash map result. For reading the values, one-by-one. + */ +public abstract class VectorMapJoinHashMapResult extends VectorMapJoinHashTableResult { + + /** + * @return Whether there are any rows (i.e. true for match). + */ + public abstract boolean hasRows(); + + /** + * @return Whether there is 1 value row. + */ + public abstract boolean isSingleRow(); + + /** + * @return Whether there is a capped count available from cappedCount. + */ + public abstract boolean isCappedCountAvailable(); + + /** + * @return The count of values, up to a arbitrary cap limit. When available, the capped + * count can be used to make decisions on how to optimally generate join results. + */ + public abstract int cappedCount(); + + /** + * @return A reference to the first value, or null if there are no values. + */ + public abstract ByteSegmentRef first(); + + /** + * @return The next value, or null if there are no more values to be read. + */ + public abstract ByteSegmentRef next(); + + /** + * @return Whether reading is at the end. + */ + public abstract boolean isEof(); +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashMultiSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashMultiSet.java new file mode 100644 index 0000000..210597d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashMultiSet.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +public interface VectorMapJoinHashMultiSet extends VectorMapJoinHashTable { + + /* + * @return A new hash multi-set result implementation specific object. + * + * The object can be used to access the *count* of values when the key is contained in the + * multi-set, or access spill information when the partition with the key is currently spilled. + */ + VectorMapJoinHashMultiSetResult createHashMultiSetResult(); + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashMultiSetResult.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashMultiSetResult.java new file mode 100644 index 0000000..0728f78 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashMultiSetResult.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +/* + * Abstract class for a hash multi-set result. + */ +public abstract class VectorMapJoinHashMultiSetResult extends VectorMapJoinHashTableResult { + + protected long count; + + /* + * @return The multi-set count for the lookup key. + */ + public long count() { + return count; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashSet.java new file mode 100644 index 0000000..a26f997 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashSet.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +/* + * The root interface for a vector map join hash set. + */ +public interface VectorMapJoinHashSet extends VectorMapJoinHashTable { + + /* + * @return A new hash set result implementation specific object. + * + * The object can be used to access access spill information when the partition with the key + * is currently spilled. + */ + VectorMapJoinHashSetResult createHashSetResult(); + +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashSetResult.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashSetResult.java new file mode 100644 index 0000000..467c4c1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashSetResult.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +/* + * Abstract class for a hash set result. + */ +public abstract class VectorMapJoinHashSetResult extends VectorMapJoinHashTableResult { + + // Nothing currently available for hash sets. + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashTable.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashTable.java new file mode 100644 index 0000000..7e219ec --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashTable.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.io.BytesWritable; + +/* + * Root interface for a vector map join hash table (which could be a hash map, hash multi-set, or + * hash set). + */ +public interface VectorMapJoinHashTable { + + + /* + * @param currentKey + * The current key. + * @param currentValue + * The current value. + */ + void putRow(BytesWritable currentKey, BytesWritable currentValue) + throws SerDeException, HiveException, IOException; + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashTableResult.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashTableResult.java new file mode 100644 index 0000000..ce598e3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinHashTableResult.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +import org.apache.hadoop.hive.ql.exec.JoinUtil; + +/* + * Root abstract class for a hash table result. + */ +public abstract class VectorMapJoinHashTableResult { + + private JoinUtil.JoinResult joinResult; + + private int spillPartitionId; + + public VectorMapJoinHashTableResult() { + joinResult = JoinUtil.JoinResult.NOMATCH; + spillPartitionId = -1; + } + + /** + * @return The join result from the most recent hash map match, or hash multi-set / set contains + * call. + */ + public JoinUtil.JoinResult joinResult() { + return joinResult; + } + + /** + * Set the current join result. + * @param joinResult + * The new join result. + */ + public void setJoinResult(JoinUtil.JoinResult joinResult) { + this.joinResult = joinResult; + } + + /** + * Forget about the most recent hash table lookup or contains call. + */ + public void forget() { + joinResult = JoinUtil.JoinResult.NOMATCH; + } + + /** + * Set the spill partition id. + */ + public void setSpillPartitionId(int spillPartitionId) { + this.spillPartitionId = spillPartitionId; + } + + /** + * @return The Hybrid Grace spill partition id. + */ + public int spillPartitionId() { + return spillPartitionId; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("joinResult " + joinResult.name()); + return sb.toString(); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinLongHashMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinLongHashMap.java new file mode 100644 index 0000000..f180d02 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinLongHashMap.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.JoinUtil; + +/* + * The interface for a single long key hash map lookup method. + */ +public interface VectorMapJoinLongHashMap + extends VectorMapJoinLongHashTable, VectorMapJoinHashMap { + + /* + * Lookup an long in the hash map. + * + * @param key + * The long key. + * @param hashMapResult + * The object to receive small table value(s) information on a MATCH. + * Or, for SPILL, it has information on where to spill the big table row. + * + * @return + * Whether the lookup was a match, no match, or spilled (the partition with the key + * is currently spilled). + */ + JoinUtil.JoinResult lookup(long key, VectorMapJoinHashMapResult hashMapResult) throws IOException; + +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinLongHashMultiSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinLongHashMultiSet.java new file mode 100644 index 0000000..7477584 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinLongHashMultiSet.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.JoinUtil; + +/* + * The interface for a single long key hash multi-set contains method. + */ +public interface VectorMapJoinLongHashMultiSet + extends VectorMapJoinLongHashTable, VectorMapJoinHashMultiSet { + + /* + * Lookup an long in the hash multi-set. + * + * @param key + * The long key. + * @param hashMultiSetResult + * The object to receive small table value(s) information on a MATCH. + * Or, for SPILL, it has information on where to spill the big table row. + * + * @return + * Whether the lookup was a match, no match, or spilled (the partition with the key + * is currently spilled). + */ + JoinUtil.JoinResult contains(long key, VectorMapJoinHashMultiSetResult hashMultiSetResult) throws IOException; + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinLongHashSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinLongHashSet.java new file mode 100644 index 0000000..8c28bff --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinLongHashSet.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.JoinUtil; + +/* + * The interface adds the single long key hash multi-set contains method. + */ +public interface VectorMapJoinLongHashSet + extends VectorMapJoinLongHashTable, VectorMapJoinHashSet { + + /* + * Lookup an long in the hash set. + * + * @param key + * The long key. + * @param hashSetResult + * The object to receive small table value(s) information on a MATCH. + * Or, for SPILL, it has information on where to spill the big table row. + * + * @return + * Whether the lookup was a match, no match, or spilled (the partition with the key + * is currently spilled). + */ + JoinUtil.JoinResult contains(long key, VectorMapJoinHashSetResult hashSetResult) throws IOException; + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinLongHashTable.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinLongHashTable.java new file mode 100644 index 0000000..046a403 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinLongHashTable.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +/* + * Interface for a vector map join hash table (which could be a hash map, hash multi-set, or + * hash set) for a single long. + */ +public interface VectorMapJoinLongHashTable extends VectorMapJoinHashTable { + + boolean useMinMax(); + long min(); + long max(); + +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinTableContainer.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinTableContainer.java new file mode 100644 index 0000000..09631e4 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/hashtable/VectorMapJoinTableContainer.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable; + +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; + +public interface VectorMapJoinTableContainer extends MapJoinTableContainer { + + VectorMapJoinHashTable vectorMapJoinHashTable(); + + // com.esotericsoftware.kryo.io.Output getHybridBigTableSpillOutput(int partitionId); +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedCreateHashTable.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedCreateHashTable.java new file mode 100644 index 0000000..5442834 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedCreateHashTable.java @@ -0,0 +1,129 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; +import org.apache.hadoop.hive.ql.plan.MapJoinDesc; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKeyType; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKind; + +/** + */ +public class VectorMapJoinOptimizedCreateHashTable { + + private static final Log LOG = LogFactory.getLog(VectorMapJoinOptimizedCreateHashTable.class.getName()); + + public static VectorMapJoinOptimizedHashTable createHashTable(MapJoinDesc desc, + MapJoinTableContainer mapJoinTableContainer) { + + MapJoinKey refKey = mapJoinTableContainer.getAnyKey(); + ReusableGetAdaptor hashMapRowGetter = mapJoinTableContainer.createGetter(refKey); + + boolean isOuterJoin = !desc.isNoOuterJoin(); + VectorMapJoinDesc vectorDesc = desc.getVectorDesc(); + HashTableKind hashTableKind = vectorDesc.hashTableKind(); + HashTableKeyType hashTableKeyType = vectorDesc.hashTableKeyType(); + boolean minMaxEnabled = vectorDesc.minMaxEnabled(); + + VectorMapJoinOptimizedHashTable hashTable = null; + + switch (hashTableKeyType) { + case BOOLEAN: + case BYTE: + case SHORT: + case INT: + case LONG: + switch (hashTableKind) { + case HASH_MAP: + hashTable = new VectorMapJoinOptimizedLongHashMap( + minMaxEnabled, isOuterJoin, hashTableKeyType, + mapJoinTableContainer, hashMapRowGetter); + break; + case HASH_MULTISET: + hashTable = new VectorMapJoinOptimizedLongHashMultiSet( + minMaxEnabled, isOuterJoin, hashTableKeyType, + mapJoinTableContainer, hashMapRowGetter); + break; + case HASH_SET: + hashTable = new VectorMapJoinOptimizedLongHashSet( + minMaxEnabled, isOuterJoin, hashTableKeyType, + mapJoinTableContainer, hashMapRowGetter); + break; + } + break; + + case STRING: + switch (hashTableKind) { + case HASH_MAP: + hashTable = new VectorMapJoinOptimizedStringHashMap( + isOuterJoin, + mapJoinTableContainer, hashMapRowGetter); + break; + case HASH_MULTISET: + hashTable = new VectorMapJoinOptimizedStringHashMultiSet( + isOuterJoin, + mapJoinTableContainer, hashMapRowGetter); + break; + case HASH_SET: + hashTable = new VectorMapJoinOptimizedStringHashSet( + isOuterJoin, + mapJoinTableContainer, hashMapRowGetter); + break; + } + break; + + case MULTI_KEY: + switch (hashTableKind) { + case HASH_MAP: + hashTable = new VectorMapJoinOptimizedMultiKeyHashMap( + isOuterJoin, + mapJoinTableContainer, hashMapRowGetter); + break; + case HASH_MULTISET: + hashTable = new VectorMapJoinOptimizedMultiKeyHashMultiSet( + isOuterJoin, + mapJoinTableContainer, hashMapRowGetter); + break; + case HASH_SET: + hashTable = new VectorMapJoinOptimizedMultiKeyHashSet( + isOuterJoin, + mapJoinTableContainer, hashMapRowGetter); + break; + } + break; + } + return hashTable; + } + + /* + @Override + public com.esotericsoftware.kryo.io.Output getHybridBigTableSpillOutput(int partitionId) { + + HybridHashTableContainer ht = (HybridHashTableContainer) mapJoinTableContainer; + + HashPartition hp = ht.getHashPartitions()[partitionId]; + + return hp.getMatchfileOutput(); + } + */ +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashMap.java new file mode 100644 index 0000000..e56c821 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashMap.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.persistence.BytesBytesMultiHashMap; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashMap; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMapResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTableResult; +import org.apache.hadoop.hive.serde2.WriteBuffers.ByteSegmentRef; + +public class VectorMapJoinOptimizedHashMap + extends VectorMapJoinOptimizedHashTable + implements VectorMapJoinBytesHashMap { + + @Override + public VectorMapJoinHashMapResult createHashMapResult() { + return new HashMapResult(); + } + + public static class HashMapResult extends VectorMapJoinHashMapResult { + + private BytesBytesMultiHashMap.Result bytesBytesMultiHashMapResult; + + public HashMapResult() { + super(); + bytesBytesMultiHashMapResult = new BytesBytesMultiHashMap.Result(); + } + + public BytesBytesMultiHashMap.Result bytesBytesMultiHashMapResult() { + return bytesBytesMultiHashMapResult; + } + + @Override + public boolean hasRows() { + return (joinResult() == JoinUtil.JoinResult.MATCH); + } + + @Override + public boolean isSingleRow() { + if (joinResult() != JoinUtil.JoinResult.MATCH) { + throw new RuntimeException("HashMapResult is not a match"); + } + return bytesBytesMultiHashMapResult.isSingleRow(); + } + + @Override + public boolean isCappedCountAvailable() { + return false; + } + + @Override + public int cappedCount() { + return 0; + } + + @Override + public ByteSegmentRef first() { + if (joinResult() != JoinUtil.JoinResult.MATCH) { + throw new RuntimeException("HashMapResult is not a match"); + } + return bytesBytesMultiHashMapResult.first(); + } + + @Override + public ByteSegmentRef next() { + return bytesBytesMultiHashMapResult.next(); + } + + @Override + public boolean isEof() { + return bytesBytesMultiHashMapResult.isEof(); + } + + @Override + public void forget() { + bytesBytesMultiHashMapResult.forget(); + super.forget(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("(" + super.toString() + ", "); + sb.append("isSingleRow " + (joinResult() == JoinUtil.JoinResult.MATCH ? isSingleRow() : "") + ")"); + return sb.toString(); + } + } + + @Override + public JoinUtil.JoinResult lookup(byte[] keyBytes, int keyOffset, int keyLength, + VectorMapJoinHashMapResult hashMapResult) throws IOException { + + HashMapResult implementationHashMapResult = (HashMapResult) hashMapResult; + + JoinUtil.JoinResult joinResult = + doLookup(keyBytes, keyOffset, keyLength, + implementationHashMapResult.bytesBytesMultiHashMapResult(), + (VectorMapJoinHashTableResult) hashMapResult); + + return joinResult; + } + + public VectorMapJoinOptimizedHashMap( + MapJoinTableContainer originalTableContainer, ReusableGetAdaptor hashMapRowGetter) { + super(originalTableContainer, hashMapRowGetter); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashMultiSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashMultiSet.java new file mode 100644 index 0000000..34de7e1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashMultiSet.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.persistence.BytesBytesMultiHashMap; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashMultiSet; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMultiSetResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTableResult; +import org.apache.hadoop.hive.serde2.WriteBuffers.ByteSegmentRef; + +public class VectorMapJoinOptimizedHashMultiSet + extends VectorMapJoinOptimizedHashTable + implements VectorMapJoinBytesHashMultiSet { + + @Override + public VectorMapJoinHashMultiSetResult createHashMultiSetResult() { + return new HashMultiSetResult(); + } + + public static class HashMultiSetResult extends VectorMapJoinHashMultiSetResult { + + private BytesBytesMultiHashMap.Result bytesBytesMultiHashMapResult; + + private boolean haveCount; + + public HashMultiSetResult() { + super(); + bytesBytesMultiHashMapResult = new BytesBytesMultiHashMap.Result(); + } + + public BytesBytesMultiHashMap.Result bytesBytesMultiHashMapResult() { + return bytesBytesMultiHashMapResult; + } + + /* + * @return The multi-set count for the lookup key. + */ + @Override + public long count() { + if (!haveCount) { + if (bytesBytesMultiHashMapResult.isSingleRow()) { + count = 1; + } else { + count = 0; + ByteSegmentRef byteSegmentRef = bytesBytesMultiHashMapResult.first(); + while (byteSegmentRef != null) { + count++; + byteSegmentRef = bytesBytesMultiHashMapResult.next(); + } + } + haveCount = true; + } + return count; + } + + @Override + public void forget() { + haveCount = false; + bytesBytesMultiHashMapResult.forget(); + super.forget(); + } + } + + @Override + public JoinUtil.JoinResult contains(byte[] keyBytes, int keyOffset, int keyLength, + VectorMapJoinHashMultiSetResult hashMultiSetResult) throws IOException { + + HashMultiSetResult implementationHashMultiSetResult = (HashMultiSetResult) hashMultiSetResult; + + JoinUtil.JoinResult joinResult = + doLookup(keyBytes, keyOffset, keyLength, + implementationHashMultiSetResult.bytesBytesMultiHashMapResult(), + (VectorMapJoinHashTableResult) hashMultiSetResult); + + return joinResult; + } + + public VectorMapJoinOptimizedHashMultiSet( + MapJoinTableContainer originalTableContainer, ReusableGetAdaptor hashMapRowGetter) { + super(originalTableContainer, hashMapRowGetter); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashSet.java new file mode 100644 index 0000000..93a89d7 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashSet.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.persistence.BytesBytesMultiHashMap; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashSet; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashSetResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTableResult; + +public class VectorMapJoinOptimizedHashSet + extends VectorMapJoinOptimizedHashTable + implements VectorMapJoinBytesHashSet { + + @Override + public VectorMapJoinHashSetResult createHashSetResult() { + return new HashSetResult(); + } + + public static class HashSetResult extends VectorMapJoinHashSetResult { + + private BytesBytesMultiHashMap.Result bytesBytesMultiHashMapResult; + + public HashSetResult() { + super(); + bytesBytesMultiHashMapResult = new BytesBytesMultiHashMap.Result(); + } + + public BytesBytesMultiHashMap.Result bytesBytesMultiHashMapResult() { + return bytesBytesMultiHashMapResult; + } + + @Override + public void forget() { + bytesBytesMultiHashMapResult.forget(); + super.forget(); + } + } + + @Override + public JoinUtil.JoinResult contains(byte[] keyBytes, int keyOffset, int keyLength, + VectorMapJoinHashSetResult hashSetResult) throws IOException { + + HashSetResult implementationHashSetResult = (HashSetResult) hashSetResult; + + JoinUtil.JoinResult joinResult = + doLookup(keyBytes, keyOffset, keyLength, + implementationHashSetResult.bytesBytesMultiHashMapResult(), + (VectorMapJoinHashTableResult) hashSetResult); + + return joinResult; + } + + public VectorMapJoinOptimizedHashSet( + MapJoinTableContainer originalTableContainer, ReusableGetAdaptor hashMapRowGetter) { + super(originalTableContainer, hashMapRowGetter); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashTable.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashTable.java new file mode 100644 index 0000000..a2d4e4c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedHashTable.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.persistence.BytesBytesMultiHashMap; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerDirectAccess; +import org.apache.hadoop.hive.ql.exec.persistence.ReusableGetAdaptorDirectAccess; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTable; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashTableResult; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.Writable; + +/* + * Root interface for a vector map join hash table (which could be a hash map, hash multi-set, or + * hash set). + */ +public abstract class VectorMapJoinOptimizedHashTable implements VectorMapJoinHashTable { + + private static final Log LOG = LogFactory.getLog(VectorMapJoinOptimizedMultiKeyHashMap.class.getName()); + + protected final MapJoinTableContainer originalTableContainer; + protected final MapJoinTableContainerDirectAccess containerDirectAccess; + protected final ReusableGetAdaptorDirectAccess adapatorDirectAccess; + + public static class SerializedBytes { + byte[] bytes; + int offset; + int length; + } + + @Override + public void putRow(BytesWritable currentKey, BytesWritable currentValue) + throws SerDeException, HiveException, IOException { + + putRowInternal(currentKey, currentValue); + } + + protected void putRowInternal(BytesWritable key, BytesWritable value) + throws SerDeException, HiveException, IOException { + + containerDirectAccess.put((Writable) key, (Writable) value); + } + + public JoinUtil.JoinResult doLookup(byte[] keyBytes, int keyOffset, int keyLength, + BytesBytesMultiHashMap.Result bytesBytesMultiHashMapResult, + VectorMapJoinHashTableResult hashTableResult) { + + hashTableResult.forget(); + + JoinUtil.JoinResult joinResult = + adapatorDirectAccess.setDirect(keyBytes, keyOffset, keyLength, + bytesBytesMultiHashMapResult); + if (joinResult == JoinUtil.JoinResult.SPILL) { + hashTableResult.setSpillPartitionId(adapatorDirectAccess.directSpillPartitionId()); + } + + hashTableResult.setJoinResult(joinResult); + + return joinResult; + } + + public VectorMapJoinOptimizedHashTable( + MapJoinTableContainer originalTableContainer, ReusableGetAdaptor hashMapRowGetter) { + + this.originalTableContainer = originalTableContainer; + containerDirectAccess = (MapJoinTableContainerDirectAccess) originalTableContainer; + adapatorDirectAccess = (ReusableGetAdaptorDirectAccess) hashMapRowGetter; + } + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedLongCommon.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedLongCommon.java new file mode 100644 index 0000000..60825ce --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedLongCommon.java @@ -0,0 +1,171 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized.VectorMapJoinOptimizedHashTable.SerializedBytes; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKeyType; +import org.apache.hadoop.hive.serde2.ByteStream.Output; +import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableSerializeWrite; + +/* + * An single long value hash map based on the BytesBytesMultiHashMap. + * + * We serialize the long key into BinarySortable format into an output buffer accepted by + * BytesBytesMultiHashMap. + */ +public class VectorMapJoinOptimizedLongCommon { + + private static final Log LOG = LogFactory.getLog(VectorMapJoinOptimizedLongCommon.class.getName()); + + private boolean isOuterJoin; + + private HashTableKeyType hashTableKeyType; + + // private BinarySortableDeserializeRead keyBinarySortableDeserializeRead; + + private BinarySortableSerializeWrite keyBinarySortableSerializeWrite; + + private transient Output output; + + private transient SerializedBytes serializedBytes; + + // protected boolean useMinMax; + protected long min; + protected long max; + + public boolean useMinMax() { + return false; + } + + public long min() { + return min; + } + + public long max() { + return max; + } + + /* + * For now, just use MapJoinBytesTableContainer / HybridHashTableContainer directly. + + public void adaptPutRow(VectorMapJoinOptimizedHashTable hashTable, + BytesWritable currentKey, BytesWritable currentValue) + throws SerDeException, HiveException, IOException { + + if (useMinMax) { + // Peek at the BinarySortable key to extract the long so we can determine min and max. + byte[] keyBytes = currentKey.getBytes(); + int keyLength = currentKey.getLength(); + keyBinarySortableDeserializeRead.set(keyBytes, 0, keyLength); + if (keyBinarySortableDeserializeRead.readCheckNull()) { + if (isOuterJoin) { + return; + } else { + // For inner join, we expect all NULL values to have been filtered out before now. + throw new HiveException("Unexpected NULL"); + } + } + long key = 0; + switch (hashTableKeyType) { + case BOOLEAN: + key = (keyBinarySortableDeserializeRead.readBoolean() ? 1 : 0); + break; + case BYTE: + key = (long) keyBinarySortableDeserializeRead.readByte(); + break; + case SHORT: + key = (long) keyBinarySortableDeserializeRead.readShort(); + break; + case INT: + key = (long) keyBinarySortableDeserializeRead.readInt(); + break; + case LONG: + key = keyBinarySortableDeserializeRead.readLong(); + break; + default: + throw new RuntimeException("Unexpected hash table key type " + hashTableKeyType.name()); + } + if (key < min) { + min = key; + } + if (key > max) { + max = key; + } + + // byte[] bytes = Arrays.copyOf(currentKey.get(), currentKey.getLength()); + // LOG.info("VectorMapJoinOptimizedLongCommon adaptPutRow key " + key + " min " + min + " max " + max + " hashTableKeyType " + hashTableKeyType.name() + " hex " + Hex.encodeHexString(bytes)); + + } + + hashTable.putRowInternal(currentKey, currentValue); + } + */ + + public SerializedBytes serialize(long key) throws IOException { + keyBinarySortableSerializeWrite.reset(); + + switch (hashTableKeyType) { + case BOOLEAN: + keyBinarySortableSerializeWrite.writeBoolean(key == 1); + break; + case BYTE: + keyBinarySortableSerializeWrite.writeByte((byte) key); + break; + case SHORT: + keyBinarySortableSerializeWrite.writeShort((short) key); + break; + case INT: + keyBinarySortableSerializeWrite.writeInt((int) key); + break; + case LONG: + keyBinarySortableSerializeWrite.writeLong(key); + break; + default: + throw new RuntimeException("Unexpected hash table key type " + hashTableKeyType.name()); + } + + // byte[] bytes = Arrays.copyOf(output.getData(), output.getLength()); + // LOG.info("VectorMapJoinOptimizedLongCommon serialize key " + key + " hashTableKeyType " + hashTableKeyType.name() + " hex " + Hex.encodeHexString(bytes)); + + serializedBytes.bytes = output.getData(); + serializedBytes.offset = 0; + serializedBytes.length = output.getLength(); + + return serializedBytes; + } + + public VectorMapJoinOptimizedLongCommon( + boolean minMaxEnabled, boolean isOuterJoin, HashTableKeyType hashTableKeyType) { + this.isOuterJoin = isOuterJoin; + // useMinMax = minMaxEnabled; + min = Long.MAX_VALUE; + max = Long.MIN_VALUE; + this.hashTableKeyType = hashTableKeyType; + // PrimitiveTypeInfo[] primitiveTypeInfos = { TypeInfoFactory.longTypeInfo }; + // keyBinarySortableDeserializeRead = new BinarySortableDeserializeRead(primitiveTypeInfos); + keyBinarySortableSerializeWrite = new BinarySortableSerializeWrite(1); + output = new Output(); + keyBinarySortableSerializeWrite.set(output); + serializedBytes = new SerializedBytes(); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedLongHashMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedLongHashMap.java new file mode 100644 index 0000000..403d265 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedLongHashMap.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.JoinUtil.JoinResult; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMapResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinLongHashMap; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKeyType; + +/* + * An single long value hash map based on the BytesBytesMultiHashMap. + * + * We serialize the long key into BinarySortable format into an output buffer accepted by + * BytesBytesMultiHashMap. + */ +public class VectorMapJoinOptimizedLongHashMap + extends VectorMapJoinOptimizedHashMap + implements VectorMapJoinLongHashMap { + + private VectorMapJoinOptimizedLongCommon longCommon; + + @Override + public boolean useMinMax() { + return longCommon.useMinMax(); + } + + @Override + public long min() { + return longCommon.min(); + } + + @Override + public long max() { + return longCommon.max(); + } + + /* + @Override + public void putRow(BytesWritable currentKey, BytesWritable currentValue) + throws SerDeException, HiveException, IOException { + + longCommon.adaptPutRow((VectorMapJoinOptimizedHashTable) this, currentKey, currentValue); + } + */ + + @Override + public JoinResult lookup(long key, + VectorMapJoinHashMapResult hashMapResult) throws IOException { + + SerializedBytes serializedBytes = longCommon.serialize(key); + + return super.lookup(serializedBytes.bytes, serializedBytes.offset, serializedBytes.length, + hashMapResult); + } + + public VectorMapJoinOptimizedLongHashMap( + boolean minMaxEnabled, boolean isOuterJoin, HashTableKeyType hashTableKeyType, + MapJoinTableContainer originalTableContainer, ReusableGetAdaptor hashMapRowGetter) { + super(originalTableContainer, hashMapRowGetter); + longCommon = new VectorMapJoinOptimizedLongCommon(minMaxEnabled, isOuterJoin, hashTableKeyType); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedLongHashMultiSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedLongHashMultiSet.java new file mode 100644 index 0000000..5fb8c3a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedLongHashMultiSet.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.JoinUtil.JoinResult; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMultiSetResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinLongHashMultiSet; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKeyType; + +/* + * An single long value hash map based on the BytesBytesMultiHashMultiSet. + * + * We serialize the long key into BinarySortable format into an output buffer accepted by + * BytesBytesMultiHashMultiSet. + */ +public class VectorMapJoinOptimizedLongHashMultiSet + extends VectorMapJoinOptimizedHashMultiSet + implements VectorMapJoinLongHashMultiSet { + + private VectorMapJoinOptimizedLongCommon longCommon; + + @Override + public boolean useMinMax() { + return longCommon.useMinMax(); + } + + @Override + public long min() { + return longCommon.min(); + } + + @Override + public long max() { + return longCommon.max(); + } + + /* + @Override + public void putRow(BytesWritable currentKey, BytesWritable currentValue) + throws SerDeException, HiveException, IOException { + + longCommon.adaptPutRow((VectorMapJoinOptimizedHashTable) this, currentKey, currentValue); + } + */ + + @Override + public JoinResult contains(long key, + VectorMapJoinHashMultiSetResult hashMultiSetResult) throws IOException { + + SerializedBytes serializedBytes = longCommon.serialize(key); + + return super.contains(serializedBytes.bytes, serializedBytes.offset, serializedBytes.length, + hashMultiSetResult); + + } + + public VectorMapJoinOptimizedLongHashMultiSet( + boolean minMaxEnabled, boolean isOuterJoin, HashTableKeyType hashTableKeyType, + MapJoinTableContainer originalTableContainer, ReusableGetAdaptor hashMapRowGetter) { + super(originalTableContainer, hashMapRowGetter); + longCommon = new VectorMapJoinOptimizedLongCommon(minMaxEnabled, isOuterJoin, hashTableKeyType); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedLongHashSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedLongHashSet.java new file mode 100644 index 0000000..c41505a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedLongHashSet.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.JoinUtil.JoinResult; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashSetResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinLongHashSet; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKeyType; + +/* + * An single long value hash map based on the BytesBytesMultiHashSet. + * + * We serialize the long key into BinarySortable format into an output buffer accepted by + * BytesBytesMultiHashSet. + */ +public class VectorMapJoinOptimizedLongHashSet + extends VectorMapJoinOptimizedHashSet + implements VectorMapJoinLongHashSet { + + private VectorMapJoinOptimizedLongCommon longCommon; + + @Override + public boolean useMinMax() { + return longCommon.useMinMax(); + } + + @Override + public long min() { + return longCommon.min(); + } + + @Override + public long max() { + return longCommon.max(); + } + + /* + @Override + public void putRow(BytesWritable currentKey, BytesWritable currentValue) + throws SerDeException, HiveException, IOException { + + longCommon.adaptPutRow((VectorMapJoinOptimizedHashTable) this, currentKey, currentValue); + } + */ + + @Override + public JoinResult contains(long key, + VectorMapJoinHashSetResult hashSetResult) throws IOException { + + SerializedBytes serializedBytes = longCommon.serialize(key); + + return super.contains(serializedBytes.bytes, serializedBytes.offset, serializedBytes.length, + hashSetResult); + + } + + public VectorMapJoinOptimizedLongHashSet( + boolean minMaxEnabled, boolean isOuterJoin, HashTableKeyType hashTableKeyType, + MapJoinTableContainer originalTableContainer, ReusableGetAdaptor hashMapRowGetter) { + super(originalTableContainer, hashMapRowGetter); + longCommon = new VectorMapJoinOptimizedLongCommon(minMaxEnabled, isOuterJoin, hashTableKeyType); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedMultiKeyHashMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedMultiKeyHashMap.java new file mode 100644 index 0000000..4f3e20e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedMultiKeyHashMap.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; + +/* + * An multi-key hash map based on the BytesBytesMultiHashMap. + */ +public class VectorMapJoinOptimizedMultiKeyHashMap + extends VectorMapJoinOptimizedHashMap { + + // UNDONE: How to look for all NULLs in a multi-key????? Let nulls through for now. + + public VectorMapJoinOptimizedMultiKeyHashMap(boolean isOuterJoin, + MapJoinTableContainer originalTableContainer, ReusableGetAdaptor hashMapRowGetter) { + super(originalTableContainer, hashMapRowGetter); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedMultiKeyHashMultiSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedMultiKeyHashMultiSet.java new file mode 100644 index 0000000..b95a2dd --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedMultiKeyHashMultiSet.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; + +/* + * An multi-key hash map based on the BytesBytesMultiHashMultiSet. + */ +public class VectorMapJoinOptimizedMultiKeyHashMultiSet + extends VectorMapJoinOptimizedHashMultiSet { + + // UNDONE: How to look for all NULLs in a multi-key????? Let nulls through for now. + + public VectorMapJoinOptimizedMultiKeyHashMultiSet(boolean isOuterJoin, + MapJoinTableContainer originalTableContainer, ReusableGetAdaptor hashMapRowGetter) { + super(originalTableContainer, hashMapRowGetter); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedMultiKeyHashSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedMultiKeyHashSet.java new file mode 100644 index 0000000..35ecc2a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedMultiKeyHashSet.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; + +/* + * An multi-key hash map based on the BytesBytesMultiHashSet. + */ +public class VectorMapJoinOptimizedMultiKeyHashSet + extends VectorMapJoinOptimizedHashSet { + + // UNDONE: How to look for all NULLs in a multi-key????? Let nulls through for now. + + public VectorMapJoinOptimizedMultiKeyHashSet(boolean isOuterJoin, + MapJoinTableContainer originalTableContainer, ReusableGetAdaptor hashMapRowGetter) { + super(originalTableContainer, hashMapRowGetter); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringCommon.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringCommon.java new file mode 100644 index 0000000..39c2d49 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringCommon.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized.VectorMapJoinOptimizedHashTable.SerializedBytes; +import org.apache.hadoop.hive.serde2.ByteStream.Output; +import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableSerializeWrite; + +/* + * An single byte array value hash map based on the BytesBytesMultiHashMap. + * + * Since BytesBytesMultiHashMap does not interpret the key as BinarySortable we optimize + * this case and just reference the byte array key directly for the lookup instead of serializing + * the byte array into BinarySortable. We rely on it just doing byte array equality comparisons. + */ +public class VectorMapJoinOptimizedStringCommon { + + // private boolean isOuterJoin; + + // private BinarySortableDeserializeRead keyBinarySortableDeserializeRead; + + // private ReadStringResults readStringResults; + + private BinarySortableSerializeWrite keyBinarySortableSerializeWrite; + + private transient Output output; + + private transient SerializedBytes serializedBytes; + + /* + private BytesWritable bytesWritable; + + public void adaptPutRow(VectorMapJoinOptimizedHashTable hashTable, + BytesWritable currentKey, BytesWritable currentValue) + throws SerDeException, HiveException, IOException { + + byte[] keyBytes = currentKey.getBytes(); + int keyLength = currentKey.getLength(); + keyBinarySortableDeserializeRead.set(keyBytes, 0, keyLength); + if (keyBinarySortableDeserializeRead.readCheckNull()) { + if (isOuterJoin) { + return; + } else { + // For inner join, we expect all NULL values to have been filtered out before now. + throw new HiveException("Unexpected NULL"); + } + } + keyBinarySortableDeserializeRead.readString(readStringResults); + + bytesWritable.set(readStringResults.bytes, readStringResults.start, readStringResults.length); + + hashTable.putRowInternal(bytesWritable, currentValue); + } + */ + + public SerializedBytes serialize(byte[] keyBytes, int keyStart, int keyLength) throws IOException { + + keyBinarySortableSerializeWrite.reset(); + keyBinarySortableSerializeWrite.writeString(keyBytes, keyStart, keyLength); + + serializedBytes.bytes = output.getData(); + serializedBytes.offset = 0; + serializedBytes.length = output.getLength(); + + return serializedBytes; + + } + + public VectorMapJoinOptimizedStringCommon(boolean isOuterJoin) { + // this.isOuterJoin = isOuterJoin; + // PrimitiveTypeInfo[] primitiveTypeInfos = { TypeInfoFactory.stringTypeInfo }; + // keyBinarySortableDeserializeRead = new BinarySortableDeserializeRead(primitiveTypeInfos); + // readStringResults = keyBinarySortableDeserializeRead.createReadStringResults(); + // bytesWritable = new BytesWritable(); + keyBinarySortableSerializeWrite = new BinarySortableSerializeWrite(1); + output = new Output(); + keyBinarySortableSerializeWrite.set(output); + serializedBytes = new SerializedBytes(); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringHashMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringHashMap.java new file mode 100644 index 0000000..220c05e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringHashMap.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.JoinUtil.JoinResult; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashMap; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMapResult; + +/* + * An multi-key hash map based on the BytesBytesMultiHashMap. + */ +public class VectorMapJoinOptimizedStringHashMap + extends VectorMapJoinOptimizedHashMap + implements VectorMapJoinBytesHashMap { + + private VectorMapJoinOptimizedStringCommon stringCommon; + + /* + @Override + public void putRow(BytesWritable currentKey, BytesWritable currentValue) + throws SerDeException, HiveException, IOException { + + stringCommon.adaptPutRow((VectorMapJoinOptimizedHashTable) this, currentKey, currentValue); + } + */ + + @Override + public JoinResult lookup(byte[] keyBytes, int keyStart, int keyLength, + VectorMapJoinHashMapResult hashMapResult) throws IOException { + + SerializedBytes serializedBytes = stringCommon.serialize(keyBytes, keyStart, keyLength); + + return super.lookup(serializedBytes.bytes, serializedBytes.offset, serializedBytes.length, + hashMapResult); + + } + + public VectorMapJoinOptimizedStringHashMap(boolean isOuterJoin, + MapJoinTableContainer originalTableContainer, ReusableGetAdaptor hashMapRowGetter) { + super(originalTableContainer, hashMapRowGetter); + stringCommon = new VectorMapJoinOptimizedStringCommon(isOuterJoin); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringHashMultiSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringHashMultiSet.java new file mode 100644 index 0000000..b6c6958 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringHashMultiSet.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.JoinUtil.JoinResult; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashMultiSet; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMultiSetResult; + +/* + * An multi-key hash map based on the BytesBytesMultiHashMultiSet. + */ +public class VectorMapJoinOptimizedStringHashMultiSet + extends VectorMapJoinOptimizedHashMultiSet + implements VectorMapJoinBytesHashMultiSet { + + private VectorMapJoinOptimizedStringCommon stringCommon; + + /* + @Override + public void putRow(BytesWritable currentKey, BytesWritable currentValue) + throws SerDeException, HiveException, IOException { + + stringCommon.adaptPutRow((VectorMapJoinOptimizedHashTable) this, currentKey, currentValue); + } + */ + + @Override + public JoinResult contains(byte[] keyBytes, int keyStart, int keyLength, + VectorMapJoinHashMultiSetResult hashMultiSetResult) throws IOException { + + SerializedBytes serializedBytes = stringCommon.serialize(keyBytes, keyStart, keyLength); + + return super.contains(serializedBytes.bytes, serializedBytes.offset, serializedBytes.length, + hashMultiSetResult); + + + } + + public VectorMapJoinOptimizedStringHashMultiSet(boolean isOuterJoin, + MapJoinTableContainer originalTableContainer, ReusableGetAdaptor hashMapRowGetter) { + super(originalTableContainer, hashMapRowGetter); + stringCommon = new VectorMapJoinOptimizedStringCommon(isOuterJoin); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringHashSet.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringHashSet.java new file mode 100644 index 0000000..f921b9c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedStringHashSet.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.optimized; + +import java.io.IOException; + +import org.apache.hadoop.hive.ql.exec.JoinUtil.JoinResult; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashSet; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashSetResult; + +/* + * An multi-key hash map based on the BytesBytesMultiHashSet. + */ +public class VectorMapJoinOptimizedStringHashSet + extends VectorMapJoinOptimizedHashSet + implements VectorMapJoinBytesHashSet { + + private VectorMapJoinOptimizedStringCommon stringCommon; + + /* + @Override + public void putRow(BytesWritable currentKey, BytesWritable currentValue) + throws SerDeException, HiveException, IOException { + + stringCommon.adaptPutRow((VectorMapJoinOptimizedHashTable) this, currentKey, currentValue); + } + */ + + @Override + public JoinResult contains(byte[] keyBytes, int keyStart, int keyLength, + VectorMapJoinHashSetResult hashSetResult) throws IOException { + + SerializedBytes serializedBytes = stringCommon.serialize(keyBytes, keyStart, keyLength); + + return super.contains(serializedBytes.bytes, serializedBytes.offset, serializedBytes.length, + hashSetResult); + + } + + public VectorMapJoinOptimizedStringHashSet(boolean isOuterJoin, + MapJoinTableContainer originalTableContainer, ReusableGetAdaptor hashMapRowGetter) { + super(originalTableContainer, hashMapRowGetter); + stringCommon = new VectorMapJoinOptimizedStringCommon(isOuterJoin); + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java index 319aacb..615fd8a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java @@ -21,7 +21,6 @@ import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; @@ -30,7 +29,6 @@ import java.util.Properties; import java.util.Set; import java.util.Stack; -import java.util.TreeMap; import java.util.regex.Pattern; import org.apache.commons.logging.Log; @@ -39,10 +37,23 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.exec.*; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; +import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey; import org.apache.hadoop.hive.ql.exec.spark.SparkTask; import org.apache.hadoop.hive.ql.exec.tez.TezTask; import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; import org.apache.hadoop.hive.ql.exec.vector.VectorGroupByOperator; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinInnerBigOnlyLongOperator; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinInnerBigOnlyMultiKeyOperator; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinInnerBigOnlyStringOperator; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinInnerLongOperator; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinInnerMultiKeyOperator; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinInnerStringOperator; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinLeftSemiLongOperator; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinLeftSemiMultiKeyOperator; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinLeftSemiStringOperator; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterLongOperator; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterMultiKeyOperator; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterStringOperator; import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; import org.apache.hadoop.hive.ql.exec.vector.VectorizationContextRegion; import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface; @@ -68,6 +79,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.GroupByDesc; +import org.apache.hadoop.hive.ql.plan.JoinDesc; import org.apache.hadoop.hive.ql.plan.MapJoinDesc; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; @@ -78,6 +90,10 @@ import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.plan.TezWork; import org.apache.hadoop.hive.ql.plan.VectorGroupByDesc; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableImplementationType; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKeyType; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKind; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.ql.udf.UDFAcos; import org.apache.hadoop.hive.ql.udf.UDFAsin; @@ -313,7 +329,7 @@ public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) // We are only vectorizing Reduce under Tez. if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_VECTORIZATION_REDUCE_ENABLED)) { - convertReduceWork((ReduceWork) w); + convertReduceWork((ReduceWork) w, true); } } } @@ -325,7 +341,7 @@ public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) } else if (baseWork instanceof ReduceWork && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_VECTORIZATION_REDUCE_ENABLED)) { - convertReduceWork((ReduceWork) baseWork); + convertReduceWork((ReduceWork) baseWork, false); } } } @@ -335,7 +351,7 @@ public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) private void convertMapWork(MapWork mapWork, boolean isTez) throws SemanticException { boolean ret = validateMapWork(mapWork, isTez); if (ret) { - vectorizeMapWork(mapWork); + vectorizeMapWork(mapWork, isTez); } } @@ -391,11 +407,11 @@ private boolean validateMapWork(MapWork mapWork, boolean isTez) throws SemanticE return true; } - private void vectorizeMapWork(MapWork mapWork) throws SemanticException { + private void vectorizeMapWork(MapWork mapWork, boolean isTez) throws SemanticException { LOG.info("Vectorizing MapWork..."); mapWork.setVectorMode(true); Map opRules = new LinkedHashMap(); - MapWorkVectorizationNodeProcessor vnp = new MapWorkVectorizationNodeProcessor(mapWork); + MapWorkVectorizationNodeProcessor vnp = new MapWorkVectorizationNodeProcessor(mapWork, isTez); addMapWorkRules(opRules, vnp); Dispatcher disp = new DefaultRuleDispatcher(vnp, opRules, null); GraphWalker ogw = new PreOrderWalker(disp); @@ -416,10 +432,10 @@ private void vectorizeMapWork(MapWork mapWork) throws SemanticException { return; } - private void convertReduceWork(ReduceWork reduceWork) throws SemanticException { + private void convertReduceWork(ReduceWork reduceWork, boolean isTez) throws SemanticException { boolean ret = validateReduceWork(reduceWork); if (ret) { - vectorizeReduceWork(reduceWork); + vectorizeReduceWork(reduceWork, isTez); } } @@ -497,7 +513,7 @@ private boolean validateReduceWork(ReduceWork reduceWork) throws SemanticExcepti return true; } - private void vectorizeReduceWork(ReduceWork reduceWork) throws SemanticException { + private void vectorizeReduceWork(ReduceWork reduceWork, boolean isTez) throws SemanticException { LOG.info("Vectorizing ReduceWork..."); reduceWork.setVectorMode(true); @@ -506,7 +522,7 @@ private void vectorizeReduceWork(ReduceWork reduceWork) throws SemanticException // VectorizationContext... Do we use PreOrderWalker instead of DefaultGraphWalker. Map opRules = new LinkedHashMap(); ReduceWorkVectorizationNodeProcessor vnp = - new ReduceWorkVectorizationNodeProcessor(reduceColumnNames, reduceTypeInfos); + new ReduceWorkVectorizationNodeProcessor(reduceColumnNames, reduceTypeInfos, isTez); addReduceWorkRules(opRules, vnp); Dispatcher disp = new DefaultRuleDispatcher(vnp, opRules, null); GraphWalker ogw = new PreOrderWalker(disp); @@ -645,11 +661,11 @@ public VectorizationContext walkStackToFindVectorizationContext(Stack stac } public Operator doVectorize(Operator op, - VectorizationContext vContext) throws SemanticException { + VectorizationContext vContext, boolean isTez) throws SemanticException { Operator vectorOp = op; try { if (!opsDone.contains(op)) { - vectorOp = vectorizeOperator(op, vContext); + vectorOp = vectorizeOperator(op, vContext, isTez); opsDone.add(op); if (vectorOp != op) { opToVectorOpMap.put(op, vectorOp); @@ -672,10 +688,12 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, class MapWorkVectorizationNodeProcessor extends VectorizationNodeProcessor { private final MapWork mWork; + private final boolean isTez; - public MapWorkVectorizationNodeProcessor(MapWork mWork) { + public MapWorkVectorizationNodeProcessor(MapWork mWork, boolean isTez) { super(); this.mWork = mWork; + this.isTez = isTez; } @Override @@ -714,7 +732,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, return null; } - Operator vectorOp = doVectorize(op, vContext); + Operator vectorOp = doVectorize(op, vContext, isTez); if (LOG.isDebugEnabled()) { if (vectorOp instanceof VectorizationContextRegion) { @@ -733,6 +751,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, private final List reduceColumnNames; private final List reduceTypeInfos; + private boolean isTez; + private Operator rootVectorOp; public Operator getRootVectorOp() { @@ -740,11 +760,12 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } public ReduceWorkVectorizationNodeProcessor(List reduceColumnNames, - List reduceTypeInfos) { + List reduceTypeInfos, boolean isTez) { super(); this.reduceColumnNames = reduceColumnNames; this.reduceTypeInfos = reduceTypeInfos; rootVectorOp = null; + this.isTez = isTez; } @Override @@ -795,7 +816,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, return null; } - Operator vectorOp = doVectorize(op, vContext); + Operator vectorOp = doVectorize(op, vContext, isTez); if (LOG.isDebugEnabled()) { if (vectorOp instanceof VectorizationContextRegion) { @@ -1304,12 +1325,278 @@ private void fixupParentChildOperators(Operator op, } } + private boolean isBigTableOnlyResults(MapJoinDesc desc) { + Byte[] order = desc.getTagOrder(); + byte posBigTable = (byte) desc.getPosBigTable(); + Byte posSingleVectorMapJoinSmallTable = (order[0] == posBigTable ? order[1] : order[0]); + + int[] smallTableIndices; + int smallTableIndicesSize; + List smallTableExprs = desc.getExprs().get(posSingleVectorMapJoinSmallTable); + if (desc.getValueIndices() != null && desc.getValueIndices().get(posSingleVectorMapJoinSmallTable) != null) { + smallTableIndices = desc.getValueIndices().get(posSingleVectorMapJoinSmallTable); + LOG.info("Vectorizer isBigTableOnlyResults smallTableIndices " + Arrays.toString(smallTableIndices)); + smallTableIndicesSize = smallTableIndices.length; + } else { + smallTableIndices = null; + LOG.info("Vectorizer isBigTableOnlyResults smallTableIndices EMPTY"); + smallTableIndicesSize = 0; + } + + List smallTableRetainList = desc.getRetainList().get(posSingleVectorMapJoinSmallTable); + LOG.info("Vectorizer isBigTableOnlyResults smallTableRetainList " + smallTableRetainList); + int smallTableRetainSize = smallTableRetainList.size(); + + if (smallTableIndicesSize > 0) { + // Small table indices has priority over retain. + for (int i = 0; i < smallTableIndicesSize; i++) { + if (smallTableIndices[i] < 0) { + // Negative numbers indicate a column to be (deserialize) read from the small table's + // LazyBinary value row. + LOG.info("Vectorizer isBigTableOnlyResults smallTableIndices[i] < 0 returning false"); + return false; + } + } + } else if (smallTableRetainSize > 0) { + LOG.info("Vectorizer isBigTableOnlyResults smallTableRetainSize > 0 returning false"); + return false; + } + + LOG.info("Vectorizer isBigTableOnlyResults returning true"); + return true; + } + + Operator specializeMapJoinOperator(Operator op, + VectorizationContext vContext, MapJoinDesc desc) throws HiveException { + Operator vectorOp = null; + Class> opClass = null; + + boolean isOuterJoin = !desc.getNoOuterJoin(); + + VectorMapJoinDesc.HashTableImplementationType hashTableImplementationType = HashTableImplementationType.NONE; + VectorMapJoinDesc.HashTableKind hashTableKind = HashTableKind.NONE; + VectorMapJoinDesc.HashTableKeyType hashTableKeyType = HashTableKeyType.NONE; + + if (HiveConf.getBoolVar(hiveConf, + HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED)) { + hashTableImplementationType = HashTableImplementationType.FAST; + } else { + // Restrict to using BytesBytesMultiHashMap via MapJoinBytesTableContainer or + // HybridHashTableContainer. + hashTableImplementationType = HashTableImplementationType.OPTIMIZED; + } + + int joinType = desc.getConds()[0].getType(); + + boolean isInnerBigOnly = false; + if (joinType == JoinDesc.INNER_JOIN && isBigTableOnlyResults(desc)) { + isInnerBigOnly = true; + } + + // By default, we can always use the multi-key class. + hashTableKeyType = HashTableKeyType.MULTI_KEY; + + if (!HiveConf.getBoolVar(hiveConf, + HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_MULTIKEY_ONLY_ENABLED)) { + + // Look for single column optimization. + byte posBigTable = (byte) desc.getPosBigTable(); + Map> keyExprs = desc.getKeys(); + List bigTableKeyExprs = keyExprs.get(posBigTable); + if (bigTableKeyExprs.size() == 1) { + String typeName = bigTableKeyExprs.get(0).getTypeString(); + LOG.info("Vectorizer vectorizeOperator map join typeName " + typeName); + if (typeName.equals("boolean")) { + hashTableKeyType = HashTableKeyType.BOOLEAN; + } else if (typeName.equals("tinyint")) { + hashTableKeyType = HashTableKeyType.BYTE; + } else if (typeName.equals("smallint")) { + hashTableKeyType = HashTableKeyType.SHORT; + } else if (typeName.equals("int")) { + hashTableKeyType = HashTableKeyType.INT; + } else if (typeName.equals("bigint") || typeName.equals("long")) { + hashTableKeyType = HashTableKeyType.LONG; + } else if (VectorizationContext.isStringFamily(typeName)) { + hashTableKeyType = HashTableKeyType.STRING; + } + } + } + + switch (joinType) { + case JoinDesc.INNER_JOIN: + if (!isInnerBigOnly) { + hashTableKind = HashTableKind.HASH_MAP; + } else { + hashTableKind = HashTableKind.HASH_MULTISET; + } + break; + case JoinDesc.LEFT_OUTER_JOIN: + case JoinDesc.RIGHT_OUTER_JOIN: + hashTableKind = HashTableKind.HASH_MAP; + break; + case JoinDesc.LEFT_SEMI_JOIN: + hashTableKind = HashTableKind.HASH_SET; + break; + default: + throw new HiveException("Unknown join type " + joinType); + } + + LOG.info("Vectorizer vectorizeOperator map join hashTableKind " + hashTableKind.name() + " hashTableKeyType " + hashTableKeyType.name()); + + switch (hashTableKeyType) { + case BOOLEAN: + case BYTE: + case SHORT: + case INT: + case LONG: + switch (joinType) { + case JoinDesc.INNER_JOIN: + if (!isInnerBigOnly) { + opClass = VectorMapJoinInnerLongOperator.class; + } else { + opClass = VectorMapJoinInnerBigOnlyLongOperator.class; + } + break; + case JoinDesc.LEFT_OUTER_JOIN: + case JoinDesc.RIGHT_OUTER_JOIN: + opClass = VectorMapJoinOuterLongOperator.class; + break; + case JoinDesc.LEFT_SEMI_JOIN: + opClass = VectorMapJoinLeftSemiLongOperator.class; + break; + default: + throw new HiveException("Unknown join type " + joinType); + } + break; + case STRING: + switch (joinType) { + case JoinDesc.INNER_JOIN: + if (!isInnerBigOnly) { + opClass = VectorMapJoinInnerStringOperator.class; + } else { + opClass = VectorMapJoinInnerBigOnlyStringOperator.class; + } + break; + case JoinDesc.LEFT_OUTER_JOIN: + case JoinDesc.RIGHT_OUTER_JOIN: + opClass = VectorMapJoinOuterStringOperator.class; + break; + case JoinDesc.LEFT_SEMI_JOIN: + opClass = VectorMapJoinLeftSemiStringOperator.class; + break; + default: + throw new HiveException("Unknown join type " + joinType); + } + break; + case MULTI_KEY: + switch (joinType) { + case JoinDesc.INNER_JOIN: + if (!isInnerBigOnly) { + opClass = VectorMapJoinInnerMultiKeyOperator.class; + } else { + opClass = VectorMapJoinInnerBigOnlyMultiKeyOperator.class; + } + break; + case JoinDesc.LEFT_OUTER_JOIN: + case JoinDesc.RIGHT_OUTER_JOIN: + opClass = VectorMapJoinOuterMultiKeyOperator.class; + break; + case JoinDesc.LEFT_SEMI_JOIN: + opClass = VectorMapJoinLeftSemiMultiKeyOperator.class; + break; + default: + throw new HiveException("Unknown join type " + joinType); + } + break; + } + + vectorOp = OperatorFactory.getVectorOperator(opClass, op.getConf(), vContext); + LOG.info("Vectorizer vectorizeOperator map join class " + vectorOp.getClass().getSimpleName()); + + boolean minMaxEnabled = HiveConf.getBoolVar(hiveConf, + HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_MINMAX_ENABLED); + + VectorMapJoinDesc vectorDesc = desc.getVectorDesc(); + vectorDesc.setHashTableImplementationType(hashTableImplementationType); + vectorDesc.setHashTableKind(hashTableKind); + vectorDesc.setHashTableKeyType(hashTableKeyType); + vectorDesc.setMinMaxEnabled(minMaxEnabled); + return vectorOp; + } + + private boolean canSpecializeMapJoin(Operator op, MapJoinDesc desc, + boolean isTez) { + + boolean specialize = false; + + if (op instanceof MapJoinOperator && + HiveConf.getBoolVar(hiveConf, + HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED)) { + + // Currently, only under Tez and non-N-way joins. + if (isTez && desc.getConds().length == 1) { + + // Ok, all basic restrictions satisfied so far... + specialize = true; + + if (!HiveConf.getBoolVar(hiveConf, + HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED)) { + + // We are using the optimized hash table we have further + // restrictions (using optimized and key type). + + if (!HiveConf.getBoolVar(hiveConf, + HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE)) { + specialize = false; + } else { + byte posBigTable = (byte) desc.getPosBigTable(); + Map> keyExprs = desc.getKeys(); + List bigTableKeyExprs = keyExprs.get(posBigTable); + for (ExprNodeDesc exprNodeDesc : bigTableKeyExprs) { + String typeName = exprNodeDesc.getTypeString(); + if (!MapJoinKey.isSupportedField(typeName)) { + specialize = false; + break; + } + } + } + } else { + + // With the fast hash table implementation, we currently do not support + // Hybrid Grace Hash Join. + + if (HiveConf.getBoolVar(hiveConf, + HiveConf.ConfVars.HIVEUSEHYBRIDGRACEHASHJOIN)) { + specialize = false; + } + } + } + } + return specialize; + } + Operator vectorizeOperator(Operator op, - VectorizationContext vContext) throws HiveException { + VectorizationContext vContext, boolean isTez) throws HiveException { Operator vectorOp = null; switch (op.getType()) { case MAPJOIN: + { + MapJoinDesc desc = (MapJoinDesc) op.getConf(); + boolean specialize = canSpecializeMapJoin(op, desc, isTez); + + if (!specialize) { + vectorOp = OperatorFactory.getVectorOperator(desc, vContext); + } else { + + // TEMPORARY Until Native Vector Map Join with Hybrid passes tests... + // HiveConf.setBoolVar(physicalContext.getConf(), + // HiveConf.ConfVars.HIVEUSEHYBRIDGRACEHASHJOIN, false); + + vectorOp = specializeMapJoinOperator(op, vContext, desc); + } + } + break; case GROUPBY: case FILTER: case SELECT: @@ -1325,6 +1612,9 @@ private void fixupParentChildOperators(Operator op, break; } + LOG.info("vectorizeOperator " + (vectorOp == null ? "NULL" : vectorOp.getClass().getName())); + LOG.info("vectorizeOperator " + (vectorOp == null || vectorOp.getConf() == null ? "NULL" : vectorOp.getConf().getClass().getName())); + if (vectorOp != op) { fixupParentChildOperators(op, vectorOp); ((AbstractOperatorDesc) vectorOp.getConf()).setVectorMode(true); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java index 4f9221e..a342738 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java @@ -60,6 +60,7 @@ public BaseWork(String name) { private String name; // Vectorization. + protected Map vectorColumnNameMap; protected Map vectorColumnTypeMap; protected Map vectorScratchColumnTypeMap; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java index c2c1b95..0192fb5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java @@ -72,12 +72,17 @@ private boolean isHybridHashJoin; + // Extra parameters only for vectorization. + private VectorMapJoinDesc vectorDesc; + public MapJoinDesc() { + vectorDesc = new VectorMapJoinDesc(); bigTableBucketNumMapping = new LinkedHashMap(); } public MapJoinDesc(MapJoinDesc clone) { super(clone); + vectorDesc = new VectorMapJoinDesc(clone.vectorDesc); this.keys = clone.keys; this.keyTblDesc = clone.keyTblDesc; this.valueTblDescs = clone.valueTblDescs; @@ -102,6 +107,7 @@ public MapJoinDesc(final Map> keys, final int posBigTable, final JoinCondDesc[] conds, final Map> filters, boolean noOuterJoin, String dumpFilePrefix) { super(values, outputColumnNames, noOuterJoin, conds, filters, null); + vectorDesc = new VectorMapJoinDesc(); this.keys = keys; this.keyTblDesc = keyTblDesc; this.valueTblDescs = valueTblDescs; @@ -112,6 +118,14 @@ public MapJoinDesc(final Map> keys, initRetainExprList(); } + public void setVectorDesc(VectorMapJoinDesc vectorDesc) { + this.vectorDesc = vectorDesc; + } + + public VectorMapJoinDesc getVectorDesc() { + return vectorDesc; + } + private void initRetainExprList() { retainList = new HashMap>(); Set>> set = super.getExprs().entrySet(); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinDesc.java new file mode 100644 index 0000000..e1bf1f4 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinDesc.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +/** + * VectorGroupByDesc. + * + * Extra parameters beyond MapJoinDesc just for the vector map join operators. + * + * We don't extend MapJoinDesc because the base OperatorDesc doesn't support + * clone and adding it is a lot work for little gain. + */ +public class VectorMapJoinDesc extends AbstractVectorDesc { + + private static long serialVersionUID = 1L; + + public static enum HashTableImplementationType { + NONE, + OPTIMIZED, + FAST + } + + public static enum HashTableKind { + NONE, + HASH_SET, + HASH_MULTISET, + HASH_MAP + } + + public static enum HashTableKeyType { + NONE, + BOOLEAN, + BYTE, + SHORT, + INT, + LONG, + STRING, + MULTI_KEY + } + + private HashTableImplementationType hashTableImplementationType; + private HashTableKind hashTableKind; + private HashTableKeyType hashTableKeyType; + private boolean minMaxEnabled; + + public VectorMapJoinDesc() { + hashTableImplementationType = HashTableImplementationType.NONE; + hashTableKind = HashTableKind.NONE; + hashTableKeyType = HashTableKeyType.NONE; + minMaxEnabled = false; + } + + public VectorMapJoinDesc(VectorMapJoinDesc clone) { + this.hashTableImplementationType = clone.hashTableImplementationType; + this.hashTableKind = clone.hashTableKind; + this.hashTableKeyType = clone.hashTableKeyType; + this.minMaxEnabled = clone.minMaxEnabled; + } + + public HashTableImplementationType hashTableImplementationType() { + return hashTableImplementationType; + } + + public void setHashTableImplementationType(HashTableImplementationType hashTableImplementationType) { + this.hashTableImplementationType = hashTableImplementationType; + } + + public HashTableKind hashTableKind() { + return hashTableKind; + } + + public void setHashTableKind(HashTableKind hashTableKind) { + this.hashTableKind = hashTableKind; + } + + public HashTableKeyType hashTableKeyType() { + return hashTableKeyType; + } + + public void setHashTableKeyType(HashTableKeyType hashTableKeyType) { + this.hashTableKeyType = hashTableKeyType; + } + + public boolean minMaxEnabled() { + return minMaxEnabled; + } + + public void setMinMaxEnabled(boolean minMaxEnabled) { + this.minMaxEnabled = minMaxEnabled; + } +} diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/TestDebugDisplay.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/TestDebugDisplay.java new file mode 100644 index 0000000..2a4f409 --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/TestDebugDisplay.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import junit.framework.TestCase; + +/** + * Unit test for the vectorized conversion to and from row object[]. + */ +public class TestDebugDisplay extends TestCase { + + public void testDebugDisplay() throws Throwable { + + try { + String result; + int[] test0 = {}; + result = VectorMapJoinGenerateResultOperator.intArrayToRangesString(test0, test0.length); + System.out.println(result); + int[] test1 = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31}; + result = VectorMapJoinGenerateResultOperator.intArrayToRangesString(test1, test1.length); + System.out.println(result); + int[] test2 = {5}; + result = VectorMapJoinGenerateResultOperator.intArrayToRangesString(test2, test2.length); + System.out.println(result); + int[] test3 = {4,4}; + result = VectorMapJoinGenerateResultOperator.intArrayToRangesString(test3, test3.length); + System.out.println(result); + int[] test4 = {0,1,2,3,4,5,6,6,7,7,8}; + result = VectorMapJoinGenerateResultOperator.intArrayToRangesString(test4, test4.length); + System.out.println(result); + int[] test5 = {0,0,1}; + result = VectorMapJoinGenerateResultOperator.intArrayToRangesString(test5, test5.length); + System.out.println(result); + int[] test6 = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31}; + result = VectorMapJoinGenerateResultOperator.intArrayToRangesString(test6, test6.length); + System.out.println(result); + int[] test7 = {4,2}; + result = VectorMapJoinGenerateResultOperator.intArrayToRangesString(test7, test7.length); + System.out.println(result); + + + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } + } +} \ No newline at end of file diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CommonFastHashTable.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CommonFastHashTable.java new file mode 100644 index 0000000..c2375e0 --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CommonFastHashTable.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; + +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMapResult; +import org.apache.hadoop.hive.serde2.WriteBuffers; + +import static org.junit.Assert.*; + +public class CommonFastHashTable { + + protected static final float LOAD_FACTOR = 0.75f; + protected static final int CAPACITY = 8; + protected static final int WB_SIZE = 128; // Make sure we cross some buffer boundaries... + protected static final int MODERATE_WB_SIZE = 8 * 1024; + protected static final int MODERATE_CAPACITY = 512; + protected static final int LARGE_WB_SIZE = 1024 * 1024; + protected static final int LARGE_CAPACITY = 8388608; + protected static Random random; + + public static int generateLargeCount() { + int count = 0; + if (random.nextInt(100) != 0) { + switch (random.nextInt(5)) { + case 0: + count = 1; + break; + case 1: + count = 2; + break; + case 2: + count = 3; + case 3: + count = 4 + random.nextInt(7); + break; + case 4: + count = 10 + random.nextInt(90); + break; + default: + throw new Error("Missing case"); + } + } else { + switch (random.nextInt(3)) { + case 0: + count = 100 + random.nextInt(900); + break; + case 1: + count = 1000 + random.nextInt(9000); + break; + case 2: + count = 10000 + random.nextInt(90000); + break; + } + } + return count; + } + public static void verifyHashMapResult(VectorMapJoinHashMapResult hashMapResult, + RandomByteArrayStream randomByteArrayStream ) { + + List resultBytes = new ArrayList(); + int count = 0; + if (hashMapResult.hasRows()) { + WriteBuffers.ByteSegmentRef ref = hashMapResult.first(); + while (ref != null) { + count++; + byte[] bytes = ref.getBytes(); + int offset = (int) ref.getOffset(); + int length = ref.getLength(); + resultBytes.add(Arrays.copyOfRange(bytes, offset, offset + length)); + ref = hashMapResult.next(); + } + } else { + assertTrue(hashMapResult.isEof()); + } + if (randomByteArrayStream.size() != count) { + assertTrue(false); + } + + for (int i = 0; i < count; ++i) { + byte[] bytes = resultBytes.get(i); + if (!randomByteArrayStream.contains(bytes)) { + assertTrue(false); + } + } + } + + public static void verifyHashMapResult(VectorMapJoinHashMapResult hashMapResult, + byte[] valueBytes ) { + + assertTrue(hashMapResult.hasRows()); + WriteBuffers.ByteSegmentRef ref = hashMapResult.first(); + byte[] bytes = ref.getBytes(); + int offset = (int) ref.getOffset(); + int length = ref.getLength(); + assertTrue(valueBytes.length == length); + boolean match = true; // Assume + for (int j = 0; j < length; j++) { + if (valueBytes[j] != bytes[offset + j]) { + match = false; + break; + } + } + if (!match) { + assertTrue(false); + } + } +} \ No newline at end of file diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/RandomByteArrayStream.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/RandomByteArrayStream.java new file mode 100644 index 0000000..3960272 --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/RandomByteArrayStream.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +public class RandomByteArrayStream { + + private Random random; + private int min; + + private List byteArrays; + + public RandomByteArrayStream(Random random) { + this.random = random; + byteArrays = new ArrayList(); + min = 1; + } + + public RandomByteArrayStream(Random random, int min) { + this.random = random; + byteArrays = new ArrayList(); + this.min = min; + } + + public byte[] next() { + int category = random.nextInt(100); + int count = 0; + if (category < 98) { + count = min + random.nextInt(10); + } else { + switch (category - 98) { + case 0: + count = Math.max(min, 10) + random.nextInt(90); + break; + case 1: + count = Math.max(min, 100) + random.nextInt(900); + } + } + byte[] bytes = new byte[count]; + random.nextBytes(bytes); + byteArrays.add(bytes); + return bytes; + } + + public int size() { + return byteArrays.size(); + } + + public byte[] get(int i) { + return byteArrays.get(i); + } + + public boolean contains(byte[] bytes) { + int length = bytes.length; + for (int i = 0; i < byteArrays.size(); i++) { + byte[] streamBytes = byteArrays.get(i); + if (streamBytes.length != length) { + continue; + } + boolean match = true; // Assume + for (int j = 0 ; j < length; j++) { + if (streamBytes[j] != bytes[j]) { + match = false; + break; + } + } + if (match) { + return true; + } + } + return false; + } +} \ No newline at end of file diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/RandomLongStream.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/RandomLongStream.java new file mode 100644 index 0000000..eab5c21 --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/RandomLongStream.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +public class RandomLongStream { + + private Random random; + + private List longs; + + public RandomLongStream(Random random) { + this.random = random; + longs = new ArrayList(); + } + + public long next() { + long longValue = random.nextLong(); + longs.add(longValue); + return longValue; + } + + public int size() { + return longs.size(); + } + + public long get(int i) { + return longs.get(i); + } +} \ No newline at end of file diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/TestVectorMapJoinFastLongHashMap.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/TestVectorMapJoinFastLongHashMap.java new file mode 100644 index 0000000..eb38b19 --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/TestVectorMapJoinFastLongHashMap.java @@ -0,0 +1,219 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.util.Random; + +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMapResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.VectorMapJoinFastLongHashMap; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKeyType; +import org.junit.Test; + +import static org.junit.Assert.*; + +public class TestVectorMapJoinFastLongHashMap extends CommonFastHashTable { + + @Test + public void testPutGetOne() throws Exception { + random = new Random(47496); + + VectorMapJoinFastLongHashMap map = + new VectorMapJoinFastLongHashMap(false, false, HashTableKeyType.LONG, CAPACITY, LOAD_FACTOR, WB_SIZE, 0); + + RandomLongStream randomLongKeyStream = new RandomLongStream(random); + RandomByteArrayStream randomByteArrayValueStream = new RandomByteArrayStream(random); + + long key = randomLongKeyStream.next(); + byte[] value = randomByteArrayValueStream.next(); + map.putRow(key, value); + verifyHashMapResult(map, key, randomByteArrayValueStream.get(0)); + + key = randomLongKeyStream.next(); + value = randomByteArrayValueStream.next(); + map.putRow(key, value); + verifyHashMapResult(map, key, randomByteArrayValueStream.get(1)); + } + + @Test + public void testPutGetMultiple() throws Exception { + random = new Random(2990); + + VectorMapJoinFastLongHashMap map = new VectorMapJoinFastLongHashMap(false, false, HashTableKeyType.LONG, CAPACITY, LOAD_FACTOR, WB_SIZE, 0); + + RandomLongStream randomLongKeyStream = new RandomLongStream(random); + RandomByteArrayStream randomByteArrayValueStream = new RandomByteArrayStream(random); + + long key = randomLongKeyStream.next(); + byte[] value = randomByteArrayValueStream.next(); + map.putRow(key, value); + verifyHashMapResult(map, key, value); + + // Same key, multiple values. + for (int i = 0; i < 3; ++i) { + value = randomByteArrayValueStream.next(); + map.putRow(key, value); + verifyHashMapResult(map, key, randomByteArrayValueStream); + } + } + + @Test + public void testGetNonExistent() throws Exception { + random = new Random(16916); + + VectorMapJoinFastLongHashMap map = new VectorMapJoinFastLongHashMap(false, false, HashTableKeyType.LONG, CAPACITY, LOAD_FACTOR, WB_SIZE, 0); + + RandomLongStream randomLongKeyStream = new RandomLongStream(random); + RandomByteArrayStream randomByteArrayValueStream = new RandomByteArrayStream(random); + + long key = randomLongKeyStream.next(); + byte[] value = randomByteArrayValueStream.next(); + map.putRow(key, value); + + key += 1; + map.putRow(key, value); + + key += 1; + VectorMapJoinHashMapResult hashMapResult = map.createHashMapResult(); + JoinUtil.JoinResult joinResult = map.lookup(key, hashMapResult); + assertTrue(joinResult == JoinUtil.JoinResult.NOMATCH); + assertTrue(!hashMapResult.hasRows()); + } + + @Test + public void testPutWithFullMap() throws Exception { + random = new Random(26078); + + // Make sure the map does not expand; should be able to find space. + VectorMapJoinFastLongHashMap map = new VectorMapJoinFastLongHashMap(false, false, HashTableKeyType.LONG, CAPACITY, 1f, WB_SIZE, 0); + + RandomLongStream randomLongKeyStream = new RandomLongStream(random); + RandomByteArrayStream randomByteArrayValueStream = new RandomByteArrayStream(random); + for (int i = 0; i < CAPACITY; ++i) { + long key = randomLongKeyStream.next(); + byte[] value = randomByteArrayValueStream.next(); + map.putRow(key, value); + } + for (int i = 0; i < randomLongKeyStream.size(); ++i) { + verifyHashMapResult(map, randomLongKeyStream.get(i), randomByteArrayValueStream.get(i)); + } + // assertEquals(CAPACITY, map.getCapacity()); + // Get of non-existent key should terminate.. + long anotherKey = randomLongKeyStream.next(); + VectorMapJoinHashMapResult hashMapResult = map.createHashMapResult(); + JoinUtil.JoinResult joinResult = map.lookup(anotherKey, hashMapResult); + assertTrue(joinResult == JoinUtil.JoinResult.NOMATCH); + } + + @Test + public void testExpand() throws Exception { + random = new Random(22470); + + // Start with capacity 1; make sure we expand on every put. + VectorMapJoinFastLongHashMap map = new VectorMapJoinFastLongHashMap(false, false, HashTableKeyType.LONG, 1, 0.0000001f, WB_SIZE, 0); + + RandomLongStream randomLongKeyStream = new RandomLongStream(random); + RandomByteArrayStream randomByteArrayValueStream = new RandomByteArrayStream(random); + + for (int i = 0; i < 18; ++i) { + long key = randomLongKeyStream.next(); + byte[] value = randomByteArrayValueStream.next(); + map.putRow(key, value); + for (int j = 0; j <= i; ++j) { + verifyHashMapResult(map, randomLongKeyStream.get(j), randomByteArrayValueStream.get(j)); + } + } + // assertEquals(1 << 18, map.getCapacity()); + } + + @Test + public void testLarge() throws Exception { + random = new Random(40719); + + // Use a large capacity that doesn't require expansion, yet. + VectorMapJoinFastLongHashMap map = new VectorMapJoinFastLongHashMap(false, false, HashTableKeyType.LONG, LARGE_CAPACITY, LOAD_FACTOR, LARGE_WB_SIZE, 0); + + RandomLongStream randomLongKeyStream = new RandomLongStream(random); + + final int largeSize = 1000; + RandomByteArrayStream[] randomByteArrayValueStreams = new RandomByteArrayStream[largeSize]; + for (int i = 0; i < largeSize; i++) { + randomByteArrayValueStreams[i] = new RandomByteArrayStream(random); + int count = generateLargeCount(); + long key = randomLongKeyStream.next(); + for (int v = 0; v < count; v++) { + byte[] value = randomByteArrayValueStreams[i].next(); + map.putRow(key, value); + } + } + for (int i = 0; i < largeSize; i++) { + verifyHashMapResult(map, randomLongKeyStream.get(i), randomByteArrayValueStreams[i]); + } + } + + @Test + public void testLargeAndExpand() throws Exception { + random = new Random(46809); + + // Use a large capacity that doesn't require expansion, yet. + VectorMapJoinFastLongHashMap map = new VectorMapJoinFastLongHashMap(false, false, HashTableKeyType.LONG, MODERATE_CAPACITY, LOAD_FACTOR, MODERATE_WB_SIZE, 0); + + RandomLongStream randomLongKeyStream = new RandomLongStream(random); + + final int largeSize = 1000; + RandomByteArrayStream[] randomByteArrayValueStreams = new RandomByteArrayStream[largeSize]; + for (int i = 0; i < largeSize; i++) { + randomByteArrayValueStreams[i] = new RandomByteArrayStream(random); + int count = generateLargeCount(); + long key = randomLongKeyStream.next(); + for (int v = 0; v < count; v++) { + byte[] value = randomByteArrayValueStreams[i].next(); + map.putRow(key, value); + } + } + for (int i = 0; i < largeSize; i++) { + verifyHashMapResult(map, randomLongKeyStream.get(i), randomByteArrayValueStreams[i]); + } + } + + private void verifyHashMapResult(VectorMapJoinFastLongHashMap map, long key, + RandomByteArrayStream randomByteArrayValueStream) { + + VectorMapJoinHashMapResult hashMapResult = map.createHashMapResult(); + JoinUtil.JoinResult joinResult = map.lookup(key, hashMapResult); + if (joinResult != JoinUtil.JoinResult.MATCH) { + assertTrue(false); + } + + CommonFastHashTable.verifyHashMapResult(hashMapResult, randomByteArrayValueStream); + } + + private void verifyHashMapResult(VectorMapJoinFastLongHashMap map, long key, + byte[] valueBytes) { + + VectorMapJoinHashMapResult hashMapResult = map.createHashMapResult(); + JoinUtil.JoinResult joinResult = map.lookup(key, hashMapResult); + if (joinResult != JoinUtil.JoinResult.MATCH) { + assertTrue(false); + } + + CommonFastHashTable.verifyHashMapResult(hashMapResult, valueBytes); + } + +} diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/TestVectorMapJoinFastMultiKeyHashMap.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/TestVectorMapJoinFastMultiKeyHashMap.java new file mode 100644 index 0000000..3c1b29a --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/TestVectorMapJoinFastMultiKeyHashMap.java @@ -0,0 +1,231 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.util.Random; + +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinHashMapResult; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.VectorMapJoinFastMultiKeyHashMap; +import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKeyType; +import org.junit.Test; + +import static org.junit.Assert.*; + +public class TestVectorMapJoinFastMultiKeyHashMap extends CommonFastHashTable { + + @Test + public void testPutGetOne() throws Exception { + random = new Random(47496); + + VectorMapJoinFastMultiKeyHashMap map = + new VectorMapJoinFastMultiKeyHashMap(false, CAPACITY, LOAD_FACTOR, WB_SIZE, 0); + + RandomByteArrayStream randomByteArrayKeyStream = new RandomByteArrayStream(random); + RandomByteArrayStream randomByteArrayValueStream = new RandomByteArrayStream(random); + + byte[] key = randomByteArrayKeyStream.next(); + byte[] value = randomByteArrayValueStream.next(); + map.putRow(key, value); + verifyHashMapResult(map, key, randomByteArrayValueStream.get(0)); + + key = randomByteArrayKeyStream.next(); + value = randomByteArrayValueStream.next(); + map.putRow(key, value); + verifyHashMapResult(map, key, randomByteArrayValueStream.get(1)); + } + + @Test + public void testPutGetMultiple() throws Exception { + random = new Random(2990); + + VectorMapJoinFastMultiKeyHashMap map = new VectorMapJoinFastMultiKeyHashMap(false, CAPACITY, LOAD_FACTOR, WB_SIZE, 0); + + RandomByteArrayStream randomByteArrayKeyStream = new RandomByteArrayStream(random); + RandomByteArrayStream randomByteArrayValueStream = new RandomByteArrayStream(random); + + byte[] key = randomByteArrayKeyStream.next(); + byte[] value = randomByteArrayValueStream.next(); + map.putRow(key, value); + verifyHashMapResult(map, key, value); + + // Same key, multiple values. + for (int i = 0; i < 3; ++i) { + value = randomByteArrayValueStream.next(); + map.putRow(key, value); + verifyHashMapResult(map, key, randomByteArrayValueStream); + } + } + + @Test + public void testGetNonExistent() throws Exception { + random = new Random(16916); + + VectorMapJoinFastMultiKeyHashMap map = new VectorMapJoinFastMultiKeyHashMap(false, CAPACITY, LOAD_FACTOR, WB_SIZE, 0); + + RandomByteArrayStream randomByteArrayKeyStream = new RandomByteArrayStream(random); + RandomByteArrayStream randomByteArrayValueStream = new RandomByteArrayStream(random); + + byte[] key = randomByteArrayKeyStream.next(); + byte[] value = randomByteArrayValueStream.next(); + map.putRow(key, value); + + key[0] = (byte) (key[0] + 1); + map.putRow(key, value); + + key[0] = (byte) (key[0] + 1); + VectorMapJoinHashMapResult hashMapResult = map.createHashMapResult(); + JoinUtil.JoinResult joinResult = map.lookup(key, 0, key.length, hashMapResult); + assertTrue(joinResult == JoinUtil.JoinResult.NOMATCH); + assertTrue(!hashMapResult.hasRows()); + } + + @Test + public void testPutWithFullMap() throws Exception { + random = new Random(26078); + + // Make sure the map does not expand; should be able to find space. + VectorMapJoinFastMultiKeyHashMap map = new VectorMapJoinFastMultiKeyHashMap(false, CAPACITY, 1f, WB_SIZE, 0); + + RandomByteArrayStream randomByteArrayKeyStream = new RandomByteArrayStream(random); + RandomByteArrayStream randomByteArrayValueStream = new RandomByteArrayStream(random); + for (int i = 0; i < CAPACITY; ++i) { + byte[] key = randomByteArrayKeyStream.next(); + byte[] value = randomByteArrayValueStream.next(); + map.putRow(key, value); + } + for (int i = 0; i < randomByteArrayKeyStream.size(); ++i) { + verifyHashMapResult(map, randomByteArrayKeyStream.get(i), randomByteArrayValueStream.get(i)); + } + // assertEquals(CAPACITY, map.getCapacity()); + // Get of non-existent key should terminate.. + byte[] anotherKey = randomByteArrayKeyStream.next(); + VectorMapJoinHashMapResult hashMapResult = map.createHashMapResult(); + JoinUtil.JoinResult joinResult = map.lookup(anotherKey, 0, anotherKey.length, hashMapResult); + assertTrue(joinResult == JoinUtil.JoinResult.NOMATCH); + } + + @Test + public void testExpand() throws Exception { + random = new Random(22470); + + // Start with capacity 1; make sure we expand on every put. + VectorMapJoinFastMultiKeyHashMap map = new VectorMapJoinFastMultiKeyHashMap(false, 1, 0.0000001f, WB_SIZE, 0); + + RandomByteArrayStream randomByteArrayKeyStream = new RandomByteArrayStream(random); + RandomByteArrayStream randomByteArrayValueStream = new RandomByteArrayStream(random); + + for (int i = 0; i < 18; ++i) { + byte[] key = randomByteArrayKeyStream.next(); + byte[] value = randomByteArrayValueStream.next(); + map.putRow(key, value); + for (int j = 0; j <= i; ++j) { + verifyHashMapResult(map, randomByteArrayKeyStream.get(j), randomByteArrayValueStream.get(j)); + } + } + // assertEquals(1 << 18, map.getCapacity()); + } + + @Test + public void testLarge() throws Exception { + random = new Random(5231); + + // Use a large capacity that doesn't require expansion, yet. + VectorMapJoinFastMultiKeyHashMap map = new VectorMapJoinFastMultiKeyHashMap(false, LARGE_CAPACITY, LOAD_FACTOR, LARGE_WB_SIZE, 0); + + RandomByteArrayStream randomByteArrayKeyStream = new RandomByteArrayStream(random, 10); + + final int largeSize = 1000; + RandomByteArrayStream[] randomByteArrayValueStreams = new RandomByteArrayStream[largeSize]; + for (int i = 0; i < largeSize; i++) { + randomByteArrayValueStreams[i] = new RandomByteArrayStream(random); + int count = generateLargeCount(); + byte[] key = randomByteArrayKeyStream.next(); + VectorMapJoinHashMapResult hashMapResult = map.createHashMapResult(); + JoinUtil.JoinResult joinResult = map.lookup(key, 0, key.length, hashMapResult); + if (joinResult == JoinUtil.JoinResult.MATCH) { + // A problem or need different random seed / longer key? + assertTrue(false); + } + for (int v = 0; v < count; v++) { + byte[] value = randomByteArrayValueStreams[i].next(); + map.putRow(key, value); + } + } + for (int i = 0; i < largeSize; i++) { + verifyHashMapResult(map, randomByteArrayKeyStream.get(i), randomByteArrayValueStreams[i]); + } + } + + @Test + public void testLargeAndExpand() throws Exception { + random = new Random(46809); + + // Use a large capacity that doesn't require expansion, yet. + VectorMapJoinFastMultiKeyHashMap map = new VectorMapJoinFastMultiKeyHashMap(false, MODERATE_CAPACITY, LOAD_FACTOR, MODERATE_WB_SIZE, 0); + + RandomByteArrayStream randomByteArrayKeyStream = new RandomByteArrayStream(random, 10); + + final int largeSize = 1000; + RandomByteArrayStream[] randomByteArrayValueStreams = new RandomByteArrayStream[largeSize]; + for (int i = 0; i < largeSize; i++) { + randomByteArrayValueStreams[i] = new RandomByteArrayStream(random); + int count = generateLargeCount(); + byte[] key = randomByteArrayKeyStream.next(); + VectorMapJoinHashMapResult hashMapResult = map.createHashMapResult(); + JoinUtil.JoinResult joinResult = map.lookup(key, 0, key.length, hashMapResult); + if (joinResult == JoinUtil.JoinResult.MATCH) { + // A problem or need different random seed / longer key? + assertTrue(false); + } + for (int v = 0; v < count; v++) { + byte[] value = randomByteArrayValueStreams[i].next(); + map.putRow(key, value); + } + } + for (int i = 0; i < largeSize; i++) { + verifyHashMapResult(map, randomByteArrayKeyStream.get(i), randomByteArrayValueStreams[i]); + } + } + + private void verifyHashMapResult(VectorMapJoinFastMultiKeyHashMap map, byte[] key, + RandomByteArrayStream randomByteArrayValueStream) { + + VectorMapJoinHashMapResult hashMapResult = map.createHashMapResult(); + JoinUtil.JoinResult joinResult = map.lookup(key, 0, key.length, hashMapResult); + if (joinResult != JoinUtil.JoinResult.MATCH) { + assertTrue(false); + } + + CommonFastHashTable.verifyHashMapResult(hashMapResult, randomByteArrayValueStream); + } + + private void verifyHashMapResult(VectorMapJoinFastMultiKeyHashMap map, byte[] key, + byte[] valueBytes) { + + VectorMapJoinHashMapResult hashMapResult = map.createHashMapResult(); + JoinUtil.JoinResult joinResult = map.lookup(key, 0, key.length, hashMapResult); + if (joinResult != JoinUtil.JoinResult.MATCH) { + assertTrue(false); + } + + CommonFastHashTable.verifyHashMapResult(hashMapResult, valueBytes); + } + +} diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/TestVectorMapJoinRowBytesContainer.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/TestVectorMapJoinRowBytesContainer.java new file mode 100644 index 0000000..3c3aacd --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/TestVectorMapJoinRowBytesContainer.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast; + +import java.util.Random; + +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinRowBytesContainer; +import org.apache.hadoop.hive.serde2.ByteStream.Output; +import org.junit.Test; + +import static org.junit.Assert.*; + +public class TestVectorMapJoinRowBytesContainer { + + public void doFillReplay(Random random, int maxCount) throws Exception { + + RandomByteArrayStream randomByteArrayStream = new RandomByteArrayStream(random); + VectorMapJoinRowBytesContainer vectorMapJoinRowBytesContainer = new VectorMapJoinRowBytesContainer(); + + int count = Math.min(maxCount, random.nextInt(500)); + for (int i = 0; i < count; i++) { + byte[] bytes = randomByteArrayStream.next(); + Output output = vectorMapJoinRowBytesContainer.getOuputForRowBytes(); + output.write(bytes); + vectorMapJoinRowBytesContainer.finishRow(); + } + vectorMapJoinRowBytesContainer.prepareForReading(); + + for (int i = 0; i < count; i++) { + if (!vectorMapJoinRowBytesContainer.readNext()) { + assertTrue(false); + } + byte[] readBytes = vectorMapJoinRowBytesContainer.currentBytes(); + int readOffset = vectorMapJoinRowBytesContainer.currentOffset(); + int readLength = vectorMapJoinRowBytesContainer.currentLength(); + byte[] expectedBytes = randomByteArrayStream.get(i); + if (readLength != expectedBytes.length) { + assertTrue(false); + } + for (int j = 0; j < readLength; j++) { + byte readByte = readBytes[readOffset + j]; + byte expectedByte = expectedBytes[j]; + if (readByte != expectedByte) { + assertTrue(false); + } + } + } + } + + @Test + public void testFillReplay() throws Exception { + Random random = new Random(47496); + + for (int i = 0; i < 10; i++) { + doFillReplay(random, 1 << i); + } + } +} \ No newline at end of file diff --git ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java index d12c137..f9a0e79 100644 --- ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java +++ ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java @@ -109,7 +109,7 @@ public void testAggregateOnUDF() throws HiveException { Vectorizer v = new Vectorizer(); Assert.assertTrue(v.validateMapWorkOperator(gbyOp, null, false)); - VectorGroupByOperator vectorOp = (VectorGroupByOperator) v.vectorizeOperator(gbyOp, vContext); + VectorGroupByOperator vectorOp = (VectorGroupByOperator) v.vectorizeOperator(gbyOp, vContext, false); Assert.assertEquals(VectorUDAFSumLong.class, vectorOp.getAggregators()[0].getClass()); VectorUDAFSumLong udaf = (VectorUDAFSumLong) vectorOp.getAggregators()[0]; Assert.assertEquals(FuncAbsLongToLong.class, udaf.getInputExpression().getClass()); diff --git ql/src/test/queries/clientpositive/vector_aggregate_9.q ql/src/test/queries/clientpositive/vector_aggregate_9.q index e085a2d..85bcc5a 100644 --- ql/src/test/queries/clientpositive/vector_aggregate_9.q +++ ql/src/test/queries/clientpositive/vector_aggregate_9.q @@ -40,4 +40,6 @@ INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; explain select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc; +-- SORT_QUERY_RESULTS + select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc; \ No newline at end of file diff --git ql/src/test/queries/clientpositive/vector_char_mapjoin1.q ql/src/test/queries/clientpositive/vector_char_mapjoin1.q index cb66674..76e9de8 100644 --- ql/src/test/queries/clientpositive/vector_char_mapjoin1.q +++ ql/src/test/queries/clientpositive/vector_char_mapjoin1.q @@ -36,14 +36,23 @@ create table char_join1_str_orc stored as orc as select * from char_join1_str; -- Join char with same length char explain select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1; + +-- SORT_QUERY_RESULTS + select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1; -- Join char with different length char explain select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1; + +-- SORT_QUERY_RESULTS + select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1; -- Join char with string explain select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1; + +-- SORT_QUERY_RESULTS + select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1; drop table char_join1_vc1; diff --git ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q index f341a11..0c07b47 100644 --- ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q +++ ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q @@ -1,21 +1,32 @@ --- SORT_QUERY_RESULTS - -CREATE TABLE decimal_mapjoin STORED AS ORC AS - SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, - CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, - cint - FROM alltypesorc; - SET hive.auto.convert.join=true; SET hive.auto.convert.join.noconditionaltask=true; SET hive.auto.convert.join.noconditionaltask.size=1000000000; SET hive.vectorized.execution.enabled=true; -EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 - FROM decimal_mapjoin l - JOIN decimal_mapjoin r ON l.cint = r.cint - WHERE l.cint = 6981; -SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 - FROM decimal_mapjoin l - JOIN decimal_mapjoin r ON l.cint = r.cint - WHERE l.cint = 6981; +CREATE TABLE over1k(t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal(4,2), + bin binary) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k; + +CREATE TABLE t1(dec decimal(4,2)) STORED AS ORC; +INSERT INTO TABLE t1 select dec from over1k; +CREATE TABLE t2(dec decimal(4,0)) STORED AS ORC; +INSERT INTO TABLE t2 select dec from over1k; + +explain +select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec); + +-- SORT_QUERY_RESULTS + +select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec); diff --git ql/src/test/queries/clientpositive/vector_inner_join.q ql/src/test/queries/clientpositive/vector_inner_join.q new file mode 100644 index 0000000..025b1a4 --- /dev/null +++ ql/src/test/queries/clientpositive/vector_inner_join.q @@ -0,0 +1,61 @@ +SET hive.vectorized.execution.enabled=true; +SET hive.auto.convert.join=true; + +CREATE TABLE orc_table_1a(a INT) STORED AS ORC; +CREATE TABLE orc_table_2a(c INT) STORED AS ORC; + +insert into table orc_table_1a values(1),(1), (2),(3); +insert into table orc_table_2a values(0),(2), (3),(null),(4); + +explain +select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2; + +select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2; + +explain +select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2; + +select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2; + + +CREATE TABLE orc_table_1b(v1 STRING, a INT) STORED AS ORC; +CREATE TABLE orc_table_2b(c INT, v2 STRING) STORED AS ORC; + +insert into table orc_table_1b values("one", 1),("one", 1), ("two", 2),("three", 3); +insert into table orc_table_2b values(0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, ""),(4, "FOUR"); + +explain +select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2; + +select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2; + + +explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2; + +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2; + +explain +select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2; + +select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2; + +explain +select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2; + +select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2; + +explain +select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2; + +select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2; + +explain +select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2; + +select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2; + +explain +select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2; + +select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2; diff --git ql/src/test/queries/clientpositive/vector_left_outer_join.q ql/src/test/queries/clientpositive/vector_left_outer_join.q index 6e96690..7c46c53 100644 --- ql/src/test/queries/clientpositive/vector_left_outer_join.q +++ ql/src/test/queries/clientpositive/vector_left_outer_join.q @@ -1,5 +1,6 @@ set hive.vectorized.execution.enabled=true; set hive.auto.convert.join=true; +set hive.mapjoin.hybridgrace.hashtable=false; explain select count(*) from (select c.ctinyint from alltypesorc c diff --git ql/src/test/queries/clientpositive/vector_outer_join0.q ql/src/test/queries/clientpositive/vector_outer_join0.q new file mode 100644 index 0000000..95bdc41 --- /dev/null +++ ql/src/test/queries/clientpositive/vector_outer_join0.q @@ -0,0 +1,25 @@ +SET hive.vectorized.execution.enabled=true; +SET hive.auto.convert.join=true; + +CREATE TABLE orc_table_1(v1 STRING, a INT) STORED AS ORC; +CREATE TABLE orc_table_2(c INT, v2 STRING) STORED AS ORC; + +insert into table orc_table_1 values ("", null),("one", 1),("one", 1),("two", 2),("three", 3),("", null); +insert into table orc_table_2 values (0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, ""),(4, "FOUR"),(null, ""); + +select * from orc_table_1; +select * from orc_table_2; + +explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c; + +-- SORT_QUERY_RESULTS + +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c; + +explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c; + +-- SORT_QUERY_RESULTS + +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c; \ No newline at end of file diff --git ql/src/test/queries/clientpositive/vector_outer_join1.q ql/src/test/queries/clientpositive/vector_outer_join1.q new file mode 100644 index 0000000..a352cd7 --- /dev/null +++ ql/src/test/queries/clientpositive/vector_outer_join1.q @@ -0,0 +1,64 @@ +SET hive.vectorized.execution.enabled=true; +SET hive.auto.convert.join=true; + +create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5; +create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5; +create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5; +create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5; + +create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q; + +ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS; +ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS; + +explain +select * +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint; + +-- SORT_QUERY_RESULTS + +select * +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint; + +explain +select c.ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint; + +-- SORT_QUERY_RESULTS + +select c.ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint; + +explain +select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +) t1; + +-- SORT_QUERY_RESULTS + +select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +) t1; \ No newline at end of file diff --git ql/src/test/queries/clientpositive/vector_outer_join2.q ql/src/test/queries/clientpositive/vector_outer_join2.q new file mode 100644 index 0000000..76aa2fd --- /dev/null +++ ql/src/test/queries/clientpositive/vector_outer_join2.q @@ -0,0 +1,39 @@ +SET hive.vectorized.execution.enabled=true; +SET hive.auto.convert.join=true; +SET hive.vectorized.execution.mapjoin.native.enabled=true; + +create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5; +create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5; +create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5; +create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5; + +create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q; + +ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS; +ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS; + +explain +select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cbigint = c.cbigint +) t1; + +-- SORT_QUERY_RESULTS + +select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cbigint = c.cbigint +) t1; \ No newline at end of file diff --git ql/src/test/queries/clientpositive/vector_outer_join3.q ql/src/test/queries/clientpositive/vector_outer_join3.q new file mode 100644 index 0000000..81161cf --- /dev/null +++ ql/src/test/queries/clientpositive/vector_outer_join3.q @@ -0,0 +1,80 @@ +SET hive.vectorized.execution.enabled=true; +SET hive.auto.convert.join=true; +SET hive.vectorized.execution.mapjoin.native.enabled=true; + +create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5; +create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5; +create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5; +create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5; + +create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q; + +ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS; +ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS; + +explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +; + +-- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1; + +explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +; + +-- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1; + +explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 and hd.cint = c.cint +) t1 +; + +-- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 and hd.cint = c.cint +) t1; \ No newline at end of file diff --git ql/src/test/queries/clientpositive/vector_outer_join4.q ql/src/test/queries/clientpositive/vector_outer_join4.q new file mode 100644 index 0000000..fb9e6e4 --- /dev/null +++ ql/src/test/queries/clientpositive/vector_outer_join4.q @@ -0,0 +1,66 @@ +SET hive.vectorized.execution.enabled=true; +SET hive.auto.convert.join=true; +SET hive.vectorized.execution.mapjoin.native.enabled=true; + +create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null limit 10; +create table small_alltypesorc2b as select * from alltypesorc where cint is null and ctinyint is not null limit 10; +create table small_alltypesorc3b as select * from alltypesorc where cint is not null and ctinyint is null limit 10; +create table small_alltypesorc4b as select * from alltypesorc where cint is null and ctinyint is null limit 10; + +create table small_alltypesorc_b stored as orc as select * from +(select * from (select * from small_alltypesorc1b) sq1 + union all + select * from (select * from small_alltypesorc2b) sq2 + union all + select * from (select * from small_alltypesorc3b) sq3 + union all + select * from (select * from small_alltypesorc4b) sq4) q; + +ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS; +ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS FOR COLUMNS; + +explain +select * +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint; + +-- SORT_QUERY_RESULTS + +select * +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint; + +explain +select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint; + +-- SORT_QUERY_RESULTS + +select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint; + +explain +select count(*) from (select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +) t1 +; + +-- SORT_QUERY_RESULTS + +select count(*) from (select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +) t1; \ No newline at end of file diff --git ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out index 5ec95c2..25020dd 100644 --- ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out +++ ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out @@ -1,34 +1,82 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE decimal_mapjoin STORED AS ORC AS - SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, - CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, - cint - FROM alltypesorc -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@alltypesorc +PREHOOK: query: CREATE TABLE over1k(t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal(4,2), + bin binary) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@decimal_mapjoin -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE decimal_mapjoin STORED AS ORC AS - SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, - CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, - cint - FROM alltypesorc -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@alltypesorc +PREHOOK: Output: default@over1k +POSTHOOK: query: CREATE TABLE over1k(t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal(4,2), + bin binary) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@over1k +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@over1k +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@over1k +PREHOOK: query: CREATE TABLE t1(dec decimal(4,2)) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 +POSTHOOK: query: CREATE TABLE t1(dec decimal(4,2)) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +PREHOOK: query: INSERT INTO TABLE t1 select dec from over1k +PREHOOK: type: QUERY +PREHOOK: Input: default@over1k +PREHOOK: Output: default@t1 +POSTHOOK: query: INSERT INTO TABLE t1 select dec from over1k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@over1k +POSTHOOK: Output: default@t1 +POSTHOOK: Lineage: t1.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] +PREHOOK: query: CREATE TABLE t2(dec decimal(4,0)) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t2 +POSTHOOK: query: CREATE TABLE t2(dec decimal(4,0)) STORED AS ORC +POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@decimal_mapjoin -PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 - FROM decimal_mapjoin l - JOIN decimal_mapjoin r ON l.cint = r.cint - WHERE l.cint = 6981 +POSTHOOK: Output: default@t2 +PREHOOK: query: INSERT INTO TABLE t2 select dec from over1k +PREHOOK: type: QUERY +PREHOOK: Input: default@over1k +PREHOOK: Output: default@t2 +POSTHOOK: query: INSERT INTO TABLE t2 select dec from over1k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@over1k +POSTHOOK: Output: default@t2 +POSTHOOK: Lineage: t2.dec EXPRESSION [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] +PREHOOK: query: explain +select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 - FROM decimal_mapjoin l - JOIN decimal_mapjoin r ON l.cint = r.cint - WHERE l.cint = 6981 +POSTHOOK: query: explain +select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -43,15 +91,15 @@ STAGE PLANS: Map 2 Map Operator Tree: TableScan - alias: r - Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE + alias: t2 + Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (cint = 6981) (type: boolean) - Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE + predicate: dec is not null (type: boolean) + Statistics: Num rows: 525 Data size: 58800 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: - 0 6981 (type: int) - 1 6981 (type: int) + 0 dec (type: decimal(6,2)) + 1 dec (type: decimal(6,0)) Local Work: Map Reduce Local Work @@ -62,28 +110,28 @@ STAGE PLANS: Map 1 Map Operator Tree: TableScan - alias: l - Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE + alias: t1 + Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (cint = 6981) (type: boolean) - Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE + predicate: dec is not null (type: boolean) + Statistics: Num rows: 525 Data size: 58800 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 keys: - 0 6981 (type: int) - 1 6981 (type: int) - outputColumnNames: _col1, _col9 + 0 dec (type: decimal(6,2)) + 1 dec (type: decimal(6,0)) + outputColumnNames: _col0, _col4 input vertices: 1 Map 2 - Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 6981 (type: int), 6981 (type: int), _col1 (type: decimal(20,10)), _col9 (type: decimal(23,14)) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE + expressions: _col0 (type: decimal(4,2)), _col4 (type: decimal(4,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -98,117 +146,123 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 - FROM decimal_mapjoin l - JOIN decimal_mapjoin r ON l.cint = r.cint - WHERE l.cint = 6981 +PREHOOK: query: -- SORT_QUERY_RESULTS + +select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_mapjoin +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 #### A masked pattern was here #### -POSTHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 - FROM decimal_mapjoin l - JOIN decimal_mapjoin r ON l.cint = r.cint - WHERE l.cint = 6981 +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_mapjoin +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 #### A masked pattern was here #### -6981 6981 -515.621072973 -617.5607769230769 -6981 6981 -515.621072973 -617.5607769230769 -6981 6981 -515.621072973 -617.5607769230769 -6981 6981 -515.621072973 -617.5607769230769 -6981 6981 -515.621072973 6984454.211097692 -6981 6981 -515.621072973 6984454.211097692 -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 5831542.269248378 -617.5607769230769 -6981 6981 5831542.269248378 -617.5607769230769 -6981 6981 5831542.269248378 6984454.211097692 -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +45 45 +45 45 +45 45 +45 45 +45 45 +6 6 +6 6 +6 6 +6 6 +6 6 +6 6 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +70 70 +70 70 +70 70 +70 70 +70 70 +70 70 +70 70 +79 79 +79 79 +79 79 +79 79 +79 79 +79 79 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 diff --git ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out index 3f8a271..382380e 100644 --- ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out +++ ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out @@ -158,11 +158,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +PREHOOK: query: -- SORT_QUERY_RESULTS + +select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc PREHOOK: type: QUERY PREHOOK: Input: default@vectortab2korc #### A masked pattern was here #### -POSTHOOK: query: select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc POSTHOOK: type: QUERY POSTHOOK: Input: default@vectortab2korc #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/tez/vector_char_mapjoin1.q.out ql/src/test/results/clientpositive/tez/vector_char_mapjoin1.q.out index b014699..f495f95 100644 --- ql/src/test/results/clientpositive/tez/vector_char_mapjoin1.q.out +++ ql/src/test/results/clientpositive/tez/vector_char_mapjoin1.q.out @@ -201,18 +201,22 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1 +PREHOOK: query: -- SORT_QUERY_RESULTS + +select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1 PREHOOK: type: QUERY PREHOOK: Input: default@char_join1_vc1_orc #### A masked pattern was here #### -POSTHOOK: query: select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1 POSTHOOK: type: QUERY POSTHOOK: Input: default@char_join1_vc1_orc #### A masked pattern was here #### -1 abc 2 abc 1 abc 1 abc -2 abc 2 abc +1 abc 2 abc 2 abc 1 abc +2 abc 2 abc 3 abc 3 abc PREHOOK: query: -- Join char with different length char explain select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1 @@ -297,20 +301,24 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1 +PREHOOK: query: -- SORT_QUERY_RESULTS + +select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1 PREHOOK: type: QUERY PREHOOK: Input: default@char_join1_vc1_orc PREHOOK: Input: default@char_join1_vc2_orc #### A masked pattern was here #### -POSTHOOK: query: select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1 POSTHOOK: type: QUERY POSTHOOK: Input: default@char_join1_vc1_orc POSTHOOK: Input: default@char_join1_vc2_orc #### A masked pattern was here #### -1 abc 2 abc 1 abc 1 abc -2 abc 2 abc +1 abc 2 abc 2 abc 1 abc +2 abc 2 abc 3 abc 3 abc PREHOOK: query: -- Join char with string explain select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1 @@ -394,12 +402,16 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1 +PREHOOK: query: -- SORT_QUERY_RESULTS + +select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1 PREHOOK: type: QUERY PREHOOK: Input: default@char_join1_str_orc PREHOOK: Input: default@char_join1_vc1_orc #### A masked pattern was here #### -POSTHOOK: query: select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1 POSTHOOK: type: QUERY POSTHOOK: Input: default@char_join1_str_orc POSTHOOK: Input: default@char_join1_vc1_orc diff --git ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out index f418ae0..240b875 100644 --- ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out +++ ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out @@ -1,34 +1,82 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE decimal_mapjoin STORED AS ORC AS - SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, - CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, - cint - FROM alltypesorc -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@alltypesorc +PREHOOK: query: CREATE TABLE over1k(t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal(4,2), + bin binary) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@decimal_mapjoin -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE decimal_mapjoin STORED AS ORC AS - SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, - CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, - cint - FROM alltypesorc -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@alltypesorc +PREHOOK: Output: default@over1k +POSTHOOK: query: CREATE TABLE over1k(t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal(4,2), + bin binary) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@over1k +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@over1k +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@over1k +PREHOOK: query: CREATE TABLE t1(dec decimal(4,2)) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 +POSTHOOK: query: CREATE TABLE t1(dec decimal(4,2)) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +PREHOOK: query: INSERT INTO TABLE t1 select dec from over1k +PREHOOK: type: QUERY +PREHOOK: Input: default@over1k +PREHOOK: Output: default@t1 +POSTHOOK: query: INSERT INTO TABLE t1 select dec from over1k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@over1k +POSTHOOK: Output: default@t1 +POSTHOOK: Lineage: t1.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] +PREHOOK: query: CREATE TABLE t2(dec decimal(4,0)) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t2 +POSTHOOK: query: CREATE TABLE t2(dec decimal(4,0)) STORED AS ORC +POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@decimal_mapjoin -PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 - FROM decimal_mapjoin l - JOIN decimal_mapjoin r ON l.cint = r.cint - WHERE l.cint = 6981 +POSTHOOK: Output: default@t2 +PREHOOK: query: INSERT INTO TABLE t2 select dec from over1k +PREHOOK: type: QUERY +PREHOOK: Input: default@over1k +PREHOOK: Output: default@t2 +POSTHOOK: query: INSERT INTO TABLE t2 select dec from over1k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@over1k +POSTHOOK: Output: default@t2 +POSTHOOK: Lineage: t2.dec EXPRESSION [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] +PREHOOK: query: explain +select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 - FROM decimal_mapjoin l - JOIN decimal_mapjoin r ON l.cint = r.cint - WHERE l.cint = 6981 +POSTHOOK: query: explain +select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -44,29 +92,29 @@ STAGE PLANS: Map 1 Map Operator Tree: TableScan - alias: l - Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE + alias: t1 + Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (cint = 6981) (type: boolean) - Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE + predicate: dec is not null (type: boolean) + Statistics: Num rows: 525 Data size: 58800 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 keys: - 0 6981 (type: int) - 1 6981 (type: int) - outputColumnNames: _col1, _col9 + 0 dec (type: decimal(6,2)) + 1 dec (type: decimal(6,2)) + outputColumnNames: _col0, _col4 input vertices: 1 Map 2 - Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE HybridGraceHashJoin: true Select Operator - expressions: 6981 (type: int), 6981 (type: int), _col1 (type: decimal(20,10)), _col9 (type: decimal(23,14)) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE + expressions: _col0 (type: decimal(4,2)), _col4 (type: decimal(4,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -75,17 +123,17 @@ STAGE PLANS: Map 2 Map Operator Tree: TableScan - alias: r - Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE + alias: t2 + Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (cint = 6981) (type: boolean) - Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE + predicate: dec is not null (type: boolean) + Statistics: Num rows: 525 Data size: 58800 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: 6981 (type: int) + key expressions: dec (type: decimal(6,2)) sort order: + - Map-reduce partition columns: 6981 (type: int) - Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE - value expressions: cdecimal2 (type: decimal(23,14)) + Map-reduce partition columns: dec (type: decimal(6,2)) + Statistics: Num rows: 525 Data size: 58800 Basic stats: COMPLETE Column stats: NONE + value expressions: dec (type: decimal(4,0)) Execution mode: vectorized Stage: Stage-0 @@ -94,117 +142,123 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 - FROM decimal_mapjoin l - JOIN decimal_mapjoin r ON l.cint = r.cint - WHERE l.cint = 6981 +PREHOOK: query: -- SORT_QUERY_RESULTS + +select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_mapjoin +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 #### A masked pattern was here #### -POSTHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 - FROM decimal_mapjoin l - JOIN decimal_mapjoin r ON l.cint = r.cint - WHERE l.cint = 6981 +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_mapjoin +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 #### A masked pattern was here #### -6981 6981 -515.621072973 -617.5607769230769 -6981 6981 -515.621072973 -617.5607769230769 -6981 6981 -515.621072973 -617.5607769230769 -6981 6981 -515.621072973 -617.5607769230769 -6981 6981 -515.621072973 6984454.211097692 -6981 6981 -515.621072973 6984454.211097692 -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 5831542.269248378 -617.5607769230769 -6981 6981 5831542.269248378 -617.5607769230769 -6981 6981 5831542.269248378 6984454.211097692 -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +45 45 +45 45 +45 45 +45 45 +45 45 +6 6 +6 6 +6 6 +6 6 +6 6 +6 6 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +70 70 +70 70 +70 70 +70 70 +70 70 +70 70 +70 70 +79 79 +79 79 +79 79 +79 79 +79 79 +79 79 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 diff --git ql/src/test/results/clientpositive/tez/vector_inner_join.q.out ql/src/test/results/clientpositive/tez/vector_inner_join.q.out new file mode 100644 index 0000000..af80260 --- /dev/null +++ ql/src/test/results/clientpositive/tez/vector_inner_join.q.out @@ -0,0 +1,806 @@ +PREHOOK: query: CREATE TABLE orc_table_1a(a INT) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_table_1a +POSTHOOK: query: CREATE TABLE orc_table_1a(a INT) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_table_1a +PREHOOK: query: CREATE TABLE orc_table_2a(c INT) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_table_2a +POSTHOOK: query: CREATE TABLE orc_table_2a(c INT) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_table_2a +PREHOOK: query: insert into table orc_table_1a values(1),(1), (2),(3) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@orc_table_1a +POSTHOOK: query: insert into table orc_table_1a values(1),(1), (2),(3) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@orc_table_1a +POSTHOOK: Lineage: orc_table_1a.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: insert into table orc_table_2a values(0),(2), (3),(null),(4) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@orc_table_2a +POSTHOOK: query: insert into table orc_table_2a values(0),(2), (3),(null),(4) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@orc_table_2a +POSTHOOK: Lineage: orc_table_2a.c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: explain +select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: c (type: int) + sort order: + + Map-reduce partition columns: c (type: int) + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 c (type: int) + 1 a (type: int) + outputColumnNames: _col4 + input vertices: + 0 Map 1 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Select Operator + expressions: _col4 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1a +PREHOOK: Input: default@orc_table_2a +#### A masked pattern was here #### +POSTHOOK: query: select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1a +POSTHOOK: Input: default@orc_table_2a +#### A masked pattern was here #### +3 +PREHOOK: query: explain +select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: c (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Semi Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0 + input vertices: + 1 Map 2 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: a is not null (type: boolean) + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: a (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1a +PREHOOK: Input: default@orc_table_2a +#### A masked pattern was here #### +POSTHOOK: query: select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1a +POSTHOOK: Input: default@orc_table_2a +#### A masked pattern was here #### +3 +PREHOOK: query: CREATE TABLE orc_table_1b(v1 STRING, a INT) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_table_1b +POSTHOOK: query: CREATE TABLE orc_table_1b(v1 STRING, a INT) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_table_1b +PREHOOK: query: CREATE TABLE orc_table_2b(c INT, v2 STRING) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_table_2b +POSTHOOK: query: CREATE TABLE orc_table_2b(c INT, v2 STRING) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_table_2b +PREHOOK: query: insert into table orc_table_1b values("one", 1),("one", 1), ("two", 2),("three", 3) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__3 +PREHOOK: Output: default@orc_table_1b +POSTHOOK: query: insert into table orc_table_1b values("one", 1),("one", 1), ("two", 2),("three", 3) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__3 +POSTHOOK: Output: default@orc_table_1b +POSTHOOK: Lineage: orc_table_1b.a EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: orc_table_1b.v1 SIMPLE [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: insert into table orc_table_2b values(0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, ""),(4, "FOUR") +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__4 +PREHOOK: Output: default@orc_table_2b +POSTHOOK: query: insert into table orc_table_2b values(0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, ""),(4, "FOUR") +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__4 +POSTHOOK: Output: default@orc_table_2b +POSTHOOK: Lineage: orc_table_2b.c EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: orc_table_2b.v2 SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: explain +select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 c (type: int) + 1 a (type: int) + outputColumnNames: _col5, _col6 + input vertices: + 1 Map 2 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Select Operator + expressions: _col5 (type: string), _col6 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: a (type: int) + sort order: + + Map-reduce partition columns: a (type: int) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + value expressions: v1 (type: string) + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1b +PREHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +POSTHOOK: query: select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1b +POSTHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +three 3 +PREHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 c (type: int) + 1 a (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 2 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Select Operator + expressions: _col5 (type: string), _col6 (type: int), _col0 (type: int), _col1 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: a (type: int) + sort order: + + Map-reduce partition columns: a (type: int) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + value expressions: v1 (type: string) + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1b +PREHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +POSTHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1b +POSTHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +three 3 3 THREE +PREHOOK: query: explain +select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 c (type: int) + 1 a (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 2 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Select Operator + expressions: _col5 (type: string), (_col6 * 2) (type: int), (_col0 * 5) (type: int), _col1 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: a (type: int) + sort order: + + Map-reduce partition columns: a (type: int) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + value expressions: v1 (type: string) + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1b +PREHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +POSTHOOK: query: select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1b +POSTHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +three 6 15 THREE +PREHOOK: query: explain +select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 c (type: int) + 1 a (type: int) + outputColumnNames: _col0, _col1, _col5 + input vertices: + 1 Map 2 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Select Operator + expressions: _col5 (type: string), _col1 (type: string), _col0 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: a (type: int) + sort order: + + Map-reduce partition columns: a (type: int) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + value expressions: v1 (type: string) + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1b +PREHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +POSTHOOK: query: select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1b +POSTHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +three THREE 3 +PREHOOK: query: explain +select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 c (type: int) + 1 a (type: int) + outputColumnNames: _col1, _col5, _col6 + input vertices: + 1 Map 2 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Select Operator + expressions: _col6 (type: int), _col5 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: a (type: int) + sort order: + + Map-reduce partition columns: a (type: int) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + value expressions: v1 (type: string) + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1b +PREHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +POSTHOOK: query: select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1b +POSTHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +3 three THREE +PREHOOK: query: explain +select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 a (type: int) + 1 c (type: int) + outputColumnNames: _col0, _col5, _col6 + input vertices: + 1 Map 2 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Select Operator + expressions: _col0 (type: string), _col6 (type: string), _col5 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: c (type: int) + sort order: + + Map-reduce partition columns: c (type: int) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + value expressions: v2 (type: string) + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1b +PREHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +POSTHOOK: query: select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1b +POSTHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +three THREE 3 +PREHOOK: query: explain +select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 a (type: int) + 1 c (type: int) + outputColumnNames: _col0, _col1, _col6 + input vertices: + 1 Map 2 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Select Operator + expressions: _col1 (type: int), _col0 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: c (type: int) + sort order: + + Map-reduce partition columns: c (type: int) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + value expressions: v2 (type: string) + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1b +PREHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +POSTHOOK: query: select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1b +POSTHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +3 three THREE diff --git ql/src/test/results/clientpositive/tez/vector_left_outer_join.q.out ql/src/test/results/clientpositive/tez/vector_left_outer_join.q.out index 5fc05ea..ce722e8 100644 --- ql/src/test/results/clientpositive/tez/vector_left_outer_join.q.out +++ ql/src/test/results/clientpositive/tez/vector_left_outer_join.q.out @@ -47,7 +47,6 @@ STAGE PLANS: input vertices: 1 Map 3 Statistics: Num rows: 13516 Data size: 2906160 Basic stats: COMPLETE Column stats: NONE - HybridGraceHashJoin: true Map Join Operator condition map: Left Outer Join0 to 1 @@ -57,7 +56,6 @@ STAGE PLANS: input vertices: 1 Map 4 Statistics: Num rows: 14867 Data size: 3196776 Basic stats: COMPLETE Column stats: NONE - HybridGraceHashJoin: true Group By Operator aggregations: count() mode: hash diff --git ql/src/test/results/clientpositive/tez/vector_outer_join.q.out ql/src/test/results/clientpositive/tez/vector_outer_join.q.out new file mode 100644 index 0000000..9691c48 --- /dev/null +++ ql/src/test/results/clientpositive/tez/vector_outer_join.q.out @@ -0,0 +1,2204 @@ +PREHOOK: query: CREATE TABLE orc_table_1(v1 STRING, a INT) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_table_1 +POSTHOOK: query: CREATE TABLE orc_table_1(v1 STRING, a INT) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_table_1 +PREHOOK: query: CREATE TABLE orc_table_2(c INT, v2 STRING) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_table_2 +POSTHOOK: query: CREATE TABLE orc_table_2(c INT, v2 STRING) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_table_2 +PREHOOK: query: insert into table orc_table_1 values ("", null),("one", 1),("one", 1),("two", 2),("three", 3),("", null) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@orc_table_1 +POSTHOOK: query: insert into table orc_table_1 values ("", null),("one", 1),("one", 1),("two", 2),("three", 3),("", null) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@orc_table_1 +POSTHOOK: Lineage: orc_table_1.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: orc_table_1.v1 SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: insert into table orc_table_2 values (0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, ""),(4, "FOUR"),(null, "") +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@orc_table_2 +POSTHOOK: query: insert into table orc_table_2 values (0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, ""),(4, "FOUR"),(null, "") +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@orc_table_2 +POSTHOOK: Lineage: orc_table_2.c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: orc_table_2.v2 SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: select * from orc_table_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1 +#### A masked pattern was here #### +POSTHOOK: query: select * from orc_table_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1 +#### A masked pattern was here #### + NULL + NULL +one 1 +one 1 +three 3 +two 2 +PREHOOK: query: select * from orc_table_2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### +POSTHOOK: query: select * from orc_table_2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### +0 ZERO +2 TWO +3 THREE +4 FOUR +NULL +NULL +PREHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 a (type: int) + 1 c (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 2 + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: int), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: c (type: int) + sort order: + + Map-reduce partition columns: c (type: int) + Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE + value expressions: v2 (type: string) + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1 +PREHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### +POSTHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1 +POSTHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### + NULL NULL NULL + NULL NULL NULL +one 1 NULL NULL +one 1 NULL NULL +three 3 3 THREE +two 2 2 TWO +PREHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: a (type: int) + sort order: + + Map-reduce partition columns: a (type: int) + Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE + value expressions: v1 (type: string) + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Right Outer Join0 to 1 + keys: + 0 a (type: int) + 1 c (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 0 Map 1 + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: int), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1 +PREHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### +POSTHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1 +POSTHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### +NULL NULL 0 ZERO +NULL NULL 4 FOUR +NULL NULL NULL +NULL NULL NULL +three 3 3 THREE +two 2 2 TWO +PREHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc1a +POSTHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc1a +PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc2a +POSTHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc2a +PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc3a +POSTHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc3a +PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc4a +POSTHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc4a +PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@small_alltypesorc1a +PREHOOK: Input: default@small_alltypesorc2a +PREHOOK: Input: default@small_alltypesorc3a +PREHOOK: Input: default@small_alltypesorc4a +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc_a +POSTHOOK: query: create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@small_alltypesorc1a +POSTHOOK: Input: default@small_alltypesorc2a +POSTHOOK: Input: default@small_alltypesorc3a +POSTHOOK: Input: default@small_alltypesorc4a +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc_a +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +PREHOOK: Output: default@small_alltypesorc_a +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +POSTHOOK: Output: default@small_alltypesorc_a +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +PREHOOK: query: explain +select * +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 11 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 + Statistics: Num rows: 11 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col2 (type: int) + 1 _col2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23 + input vertices: + 1 Map 2 + Statistics: Num rows: 12 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 12 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 11 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 + Statistics: Num rows: 11 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col2 (type: int) + sort order: + + Map-reduce partition columns: _col2 (type: int) + Statistics: Num rows: 11 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean) + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select * +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select * +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +-21 -200 NULL NULL -21.0 -200.0 NULL NULL 1969-12-31 16:00:09.052 1969-12-31 15:59:55.451 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +3 -200 NULL -1438142492 3.0 -200.0 NULL T8Uakh8tudd1XRG5yKW8Y42H 1969-12-31 16:00:07.648 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +31 -200 NULL -245476731 31.0 -200.0 NULL 3E3BxP 1969-12-31 15:59:54.739 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +38 -200 NULL -1090414113 38.0 -200.0 NULL 5oQ43l 1969-12-31 16:00:05.478 1969-12-31 15:59:55.451 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +38 -200 NULL 1576772382 38.0 -200.0 NULL X3Ufbt46AUIfHe 1969-12-31 15:59:49.567 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +PREHOOK: query: explain +select c.ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +PREHOOK: type: QUERY +POSTHOOK: query: explain +select c.ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + outputColumnNames: _col0 + input vertices: + 1 Map 2 + Statistics: Num rows: 1005 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1005 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Map-reduce partition columns: _col0 (type: tinyint) + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select c.ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select c.ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +-21 +-28 +-34 +-50 +29 +3 +31 +31 +31 +31 +38 +38 +38 +38 +NULL +NULL +NULL +NULL +NULL +PREHOOK: query: explain +select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 457 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), cint (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 457 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col1 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0 + input vertices: + 1 Map 3 + Statistics: Num rows: 1005 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + outputColumnNames: _col0 + input vertices: + 1 Map 4 + Statistics: Num rows: 1105 Data size: 4423 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(), sum(_col0) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint), _col1 (type: bigint) + Execution mode: vectorized + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Map 4 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Map-reduce partition columns: _col0 (type: tinyint) + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0), sum(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +108 1646 +PREHOOK: query: explain +select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cbigint = c.cbigint +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cbigint = c.cbigint +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 304 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int), cbigint (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 304 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col1 + input vertices: + 1 Map 3 + Statistics: Num rows: 1005 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col1 (type: bigint) + 1 _col0 (type: bigint) + outputColumnNames: _col1 + input vertices: + 1 Map 4 + Statistics: Num rows: 1105 Data size: 4423 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1105 Data size: 4423 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(), sum(_col0) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint), _col1 (type: bigint) + Execution mode: vectorized + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Map 4 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 457 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cbigint (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 457 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Map-reduce partition columns: _col0 (type: bigint) + Statistics: Num rows: 457 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0), sum(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cbigint = c.cbigint +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cbigint = c.cbigint +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +105 -1197260954 +PREHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 35 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int), cstring1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 35 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col1 + input vertices: + 1 Map 3 + Statistics: Num rows: 1005 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col1 (type: string) + 1 _col0 (type: string) + input vertices: + 1 Map 4 + Statistics: Num rows: 1105 Data size: 4423 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Map 4 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cstring1 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +1005 +PREHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 18 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cstring1 (type: string), cstring2 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 18 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col1 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col0 + input vertices: + 1 Map 3 + Statistics: Num rows: 39 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: string) + 1 _col0 (type: string) + input vertices: + 1 Map 4 + Statistics: Num rows: 42 Data size: 4423 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cstring2 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Map 4 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cstring1 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +105 +PREHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 and hd.cint = c.cint +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 and hd.cint = c.cint +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 17 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cstring2 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 17 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col3 (type: string), _col1 (type: bigint) + 1 _col1 (type: string), _col0 (type: bigint) + outputColumnNames: _col0, _col2 + input vertices: + 1 Map 3 + Statistics: Num rows: 36 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col2 (type: string), _col0 (type: int) + 1 _col1 (type: string), _col0 (type: int) + input vertices: + 1 Map 4 + Statistics: Num rows: 39 Data size: 4423 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 33 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cbigint (type: bigint), cstring2 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 33 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col1 (type: string), _col0 (type: bigint) + sort order: ++ + Map-reduce partition columns: _col1 (type: string), _col0 (type: bigint) + Statistics: Num rows: 33 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Map 4 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 35 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int), cstring1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 35 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col1 (type: string), _col0 (type: int) + sort order: ++ + Map-reduce partition columns: _col1 (type: string), _col0 (type: int) + Statistics: Num rows: 35 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 and hd.cint = c.cint +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 and hd.cint = c.cint +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +105 +PREHOOK: query: create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc1b +POSTHOOK: query: create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc1b +PREHOOK: query: create table small_alltypesorc2b as select * from alltypesorc where cint is null and ctinyint is not null limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc2b +POSTHOOK: query: create table small_alltypesorc2b as select * from alltypesorc where cint is null and ctinyint is not null limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc2b +PREHOOK: query: create table small_alltypesorc3b as select * from alltypesorc where cint is not null and ctinyint is null limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc3b +POSTHOOK: query: create table small_alltypesorc3b as select * from alltypesorc where cint is not null and ctinyint is null limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc3b +PREHOOK: query: create table small_alltypesorc4b as select * from alltypesorc where cint is null and ctinyint is null limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc4b +POSTHOOK: query: create table small_alltypesorc4b as select * from alltypesorc where cint is null and ctinyint is null limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc4b +PREHOOK: query: create table small_alltypesorc_b stored as orc as select * from +(select * from (select * from small_alltypesorc1b) sq1 + union all + select * from (select * from small_alltypesorc2b) sq2 + union all + select * from (select * from small_alltypesorc3b) sq3 + union all + select * from (select * from small_alltypesorc4b) sq4) q +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@small_alltypesorc1b +PREHOOK: Input: default@small_alltypesorc2b +PREHOOK: Input: default@small_alltypesorc3b +PREHOOK: Input: default@small_alltypesorc4b +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc_b +POSTHOOK: query: create table small_alltypesorc_b stored as orc as select * from +(select * from (select * from small_alltypesorc1b) sq1 + union all + select * from (select * from small_alltypesorc2b) sq2 + union all + select * from (select * from small_alltypesorc3b) sq3 + union all + select * from (select * from small_alltypesorc4b) sq4) q +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@small_alltypesorc1b +POSTHOOK: Input: default@small_alltypesorc2b +POSTHOOK: Input: default@small_alltypesorc3b +POSTHOOK: Input: default@small_alltypesorc4b +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc_b +PREHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_b +PREHOOK: Output: default@small_alltypesorc_b +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_b +POSTHOOK: Output: default@small_alltypesorc_b +PREHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS FOR COLUMNS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS FOR COLUMNS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +PREHOOK: query: explain +select * +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 12 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 + Statistics: Num rows: 12 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col2 (type: int) + 1 _col2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23 + input vertices: + 1 Map 2 + Statistics: Num rows: 13 Data size: 4374 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 13 Data size: 4374 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 12 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 + Statistics: Num rows: 12 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col2 (type: int) + sort order: + + Map-reduce partition columns: _col2 (type: int) + Statistics: Num rows: 12 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean) + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select * +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select * +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +-10 -200 NULL -1818374653 -10.0 -200.0 NULL uFavNs7g58qrfyCH681d 1969-12-31 16:00:03.248 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +-21 -200 NULL NULL -21.0 -200.0 NULL NULL 1969-12-31 16:00:09.052 1969-12-31 15:59:55.451 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +-30 -200 NULL -815881183 -30.0 -200.0 NULL B0B5kG3OIl6C 1969-12-31 15:59:44.842 1969-12-31 15:59:55.451 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +-39 -200 NULL 944477914 -39.0 -200.0 NULL T8brJ213nd7rhW8XdnB1 1969-12-31 16:00:00.958 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +-63 -200 NULL 1927856372 -63.0 -200.0 NULL v6mk2b7oX 1969-12-31 16:00:06.852 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +3 -200 NULL -1438142492 3.0 -200.0 NULL T8Uakh8tudd1XRG5yKW8Y42H 1969-12-31 16:00:07.648 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +31 -200 NULL -245476731 31.0 -200.0 NULL 3E3BxP 1969-12-31 15:59:54.739 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +38 -200 NULL -1090414113 38.0 -200.0 NULL 5oQ43l 1969-12-31 16:00:05.478 1969-12-31 15:59:55.451 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +38 -200 NULL 1576772382 38.0 -200.0 NULL X3Ufbt46AUIfHe 1969-12-31 15:59:49.567 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +8 -200 NULL -1236645308 8.0 -200.0 NULL M28wJxOvunO3EImapE7OApQ 1969-12-31 15:59:46.007 1969-12-31 15:59:55.451 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +PREHOOK: query: explain +select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +PREHOOK: type: QUERY +POSTHOOK: query: explain +select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + outputColumnNames: _col0 + input vertices: + 1 Map 2 + Statistics: Num rows: 1093 Data size: 4374 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1093 Data size: 4374 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Map-reduce partition columns: _col0 (type: tinyint) + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +-10 +-11 +-21 +-28 +-30 +-34 +-39 +-50 +-63 +16 +27 +29 +3 +31 +31 +31 +31 +31 +31 +31 +31 +31 +38 +38 +38 +38 +61 +8 +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +PREHOOK: query: explain +select count(*) from (select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from (select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 497 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), cint (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 497 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col1 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0 + input vertices: + 1 Map 3 + Statistics: Num rows: 1093 Data size: 4374 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + input vertices: + 1 Map 4 + Statistics: Num rows: 1202 Data size: 4811 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Map 4 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Map-reduce partition columns: _col0 (type: tinyint) + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +414 diff --git ql/src/test/results/clientpositive/tez/vector_outer_join0.q.out ql/src/test/results/clientpositive/tez/vector_outer_join0.q.out new file mode 100644 index 0000000..d1ee177 --- /dev/null +++ ql/src/test/results/clientpositive/tez/vector_outer_join0.q.out @@ -0,0 +1,232 @@ +PREHOOK: query: CREATE TABLE orc_table_1(v1 STRING, a INT) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_table_1 +POSTHOOK: query: CREATE TABLE orc_table_1(v1 STRING, a INT) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_table_1 +PREHOOK: query: CREATE TABLE orc_table_2(c INT, v2 STRING) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_table_2 +POSTHOOK: query: CREATE TABLE orc_table_2(c INT, v2 STRING) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_table_2 +PREHOOK: query: insert into table orc_table_1 values ("", null),("one", 1),("one", 1),("two", 2),("three", 3),("", null) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@orc_table_1 +POSTHOOK: query: insert into table orc_table_1 values ("", null),("one", 1),("one", 1),("two", 2),("three", 3),("", null) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@orc_table_1 +POSTHOOK: Lineage: orc_table_1.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: orc_table_1.v1 SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: insert into table orc_table_2 values (0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, ""),(4, "FOUR"),(null, "") +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@orc_table_2 +POSTHOOK: query: insert into table orc_table_2 values (0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, ""),(4, "FOUR"),(null, "") +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@orc_table_2 +POSTHOOK: Lineage: orc_table_2.c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: orc_table_2.v2 SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: select * from orc_table_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1 +#### A masked pattern was here #### +POSTHOOK: query: select * from orc_table_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1 +#### A masked pattern was here #### + NULL + NULL +one 1 +one 1 +three 3 +two 2 +PREHOOK: query: select * from orc_table_2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### +POSTHOOK: query: select * from orc_table_2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### +0 ZERO +2 TWO +3 THREE +4 FOUR +NULL +NULL +PREHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 a (type: int) + 1 c (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 2 + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Select Operator + expressions: _col0 (type: string), _col1 (type: int), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: c (type: int) + sort order: + + Map-reduce partition columns: c (type: int) + Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE + value expressions: v2 (type: string) + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1 +PREHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1 +POSTHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### + NULL NULL NULL + NULL NULL NULL +one 1 NULL NULL +one 1 NULL NULL +three 3 3 THREE +two 2 2 TWO +PREHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: a (type: int) + sort order: + + Map-reduce partition columns: a (type: int) + Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE + value expressions: v1 (type: string) + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Right Outer Join0 to 1 + keys: + 0 a (type: int) + 1 c (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 0 Map 1 + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Select Operator + expressions: _col0 (type: string), _col1 (type: int), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1 +PREHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1 +POSTHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### +NULL NULL 0 ZERO +NULL NULL 4 FOUR +NULL NULL NULL +NULL NULL NULL +three 3 3 THREE +two 2 2 TWO diff --git ql/src/test/results/clientpositive/tez/vector_outer_join1.q.out ql/src/test/results/clientpositive/tez/vector_outer_join1.q.out new file mode 100644 index 0000000..8368a77 --- /dev/null +++ ql/src/test/results/clientpositive/tez/vector_outer_join1.q.out @@ -0,0 +1,541 @@ +PREHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc1a +POSTHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc1a +PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc2a +POSTHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc2a +PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc3a +POSTHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc3a +PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc4a +POSTHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc4a +PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@small_alltypesorc1a +PREHOOK: Input: default@small_alltypesorc2a +PREHOOK: Input: default@small_alltypesorc3a +PREHOOK: Input: default@small_alltypesorc4a +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc_a +POSTHOOK: query: create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@small_alltypesorc1a +POSTHOOK: Input: default@small_alltypesorc2a +POSTHOOK: Input: default@small_alltypesorc3a +POSTHOOK: Input: default@small_alltypesorc4a +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc_a +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +PREHOOK: Output: default@small_alltypesorc_a +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +POSTHOOK: Output: default@small_alltypesorc_a +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +PREHOOK: query: explain +select * +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 11 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 + Statistics: Num rows: 11 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col2 (type: int) + 1 _col2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23 + input vertices: + 1 Map 2 + Statistics: Num rows: 12 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + File Output Operator + compressed: false + Statistics: Num rows: 12 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 11 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 + Statistics: Num rows: 11 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col2 (type: int) + sort order: + + Map-reduce partition columns: _col2 (type: int) + Statistics: Num rows: 11 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean) + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select * +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select * +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +-21 -200 NULL NULL -21.0 -200.0 NULL NULL 1969-12-31 16:00:09.052 1969-12-31 15:59:55.451 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +3 -200 NULL -1438142492 3.0 -200.0 NULL T8Uakh8tudd1XRG5yKW8Y42H 1969-12-31 16:00:07.648 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +31 -200 NULL -245476731 31.0 -200.0 NULL 3E3BxP 1969-12-31 15:59:54.739 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +38 -200 NULL -1090414113 38.0 -200.0 NULL 5oQ43l 1969-12-31 16:00:05.478 1969-12-31 15:59:55.451 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +38 -200 NULL 1576772382 38.0 -200.0 NULL X3Ufbt46AUIfHe 1969-12-31 15:59:49.567 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +PREHOOK: query: explain +select c.ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +PREHOOK: type: QUERY +POSTHOOK: query: explain +select c.ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + outputColumnNames: _col0 + input vertices: + 1 Map 2 + Statistics: Num rows: 1005 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + File Output Operator + compressed: false + Statistics: Num rows: 1005 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Map-reduce partition columns: _col0 (type: tinyint) + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select c.ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select c.ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +-21 +-28 +-34 +-50 +29 +3 +31 +31 +31 +31 +38 +38 +38 +38 +NULL +NULL +NULL +NULL +NULL +PREHOOK: query: explain +select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 457 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), cint (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 457 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col1 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0 + input vertices: + 1 Map 3 + Statistics: Num rows: 1005 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + outputColumnNames: _col0 + input vertices: + 1 Map 4 + Statistics: Num rows: 1105 Data size: 4423 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Group By Operator + aggregations: count(), sum(_col0) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint), _col1 (type: bigint) + Execution mode: vectorized + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Map 4 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Map-reduce partition columns: _col0 (type: tinyint) + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0), sum(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +118 -14 diff --git ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out new file mode 100644 index 0000000..8d38477 --- /dev/null +++ ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out @@ -0,0 +1,238 @@ +PREHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc1a +POSTHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc1a +PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc2a +POSTHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc2a +PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc3a +POSTHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc3a +PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc4a +POSTHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc4a +PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@small_alltypesorc1a +PREHOOK: Input: default@small_alltypesorc2a +PREHOOK: Input: default@small_alltypesorc3a +PREHOOK: Input: default@small_alltypesorc4a +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc_a +POSTHOOK: query: create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@small_alltypesorc1a +POSTHOOK: Input: default@small_alltypesorc2a +POSTHOOK: Input: default@small_alltypesorc3a +POSTHOOK: Input: default@small_alltypesorc4a +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc_a +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +PREHOOK: Output: default@small_alltypesorc_a +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +POSTHOOK: Output: default@small_alltypesorc_a +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +PREHOOK: query: explain +select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cbigint = c.cbigint +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cbigint = c.cbigint +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 304 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int), cbigint (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 304 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col1 + input vertices: + 1 Map 3 + Statistics: Num rows: 1005 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col1 (type: bigint) + 1 _col0 (type: bigint) + outputColumnNames: _col1 + input vertices: + 1 Map 4 + Statistics: Num rows: 1105 Data size: 4423 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Select Operator + expressions: _col1 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1105 Data size: 4423 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(), sum(_col0) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint), _col1 (type: bigint) + Execution mode: vectorized + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Map 4 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 457 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cbigint (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 457 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Map-reduce partition columns: _col0 (type: bigint) + Statistics: Num rows: 457 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0), sum(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cbigint = c.cbigint +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cbigint = c.cbigint +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +105 -1197260954 diff --git ql/src/test/results/clientpositive/tez/vector_outer_join3.q.out ql/src/test/results/clientpositive/tez/vector_outer_join3.q.out new file mode 100644 index 0000000..b45192c --- /dev/null +++ ql/src/test/results/clientpositive/tez/vector_outer_join3.q.out @@ -0,0 +1,527 @@ +PREHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc1a +POSTHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc1a +PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc2a +POSTHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc2a +PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc3a +POSTHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc3a +PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc4a +POSTHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc4a +PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@small_alltypesorc1a +PREHOOK: Input: default@small_alltypesorc2a +PREHOOK: Input: default@small_alltypesorc3a +PREHOOK: Input: default@small_alltypesorc4a +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc_a +POSTHOOK: query: create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@small_alltypesorc1a +POSTHOOK: Input: default@small_alltypesorc2a +POSTHOOK: Input: default@small_alltypesorc3a +POSTHOOK: Input: default@small_alltypesorc4a +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc_a +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +PREHOOK: Output: default@small_alltypesorc_a +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +POSTHOOK: Output: default@small_alltypesorc_a +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +PREHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 35 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int), cstring1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 35 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col1 + input vertices: + 1 Map 3 + Statistics: Num rows: 1005 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col1 (type: string) + 1 _col0 (type: string) + input vertices: + 1 Map 4 + Statistics: Num rows: 1105 Data size: 4423 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 914 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Map 4 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cstring1 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +1005 +PREHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 18 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cstring1 (type: string), cstring2 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 18 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col1 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col0 + input vertices: + 1 Map 3 + Statistics: Num rows: 39 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: string) + 1 _col0 (type: string) + input vertices: + 1 Map 4 + Statistics: Num rows: 42 Data size: 4423 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cstring2 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Map 4 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cstring1 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 36 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +105 +PREHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 and hd.cint = c.cint +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 and hd.cint = c.cint +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 17 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cstring2 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 17 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col3 (type: string), _col1 (type: bigint) + 1 _col1 (type: string), _col0 (type: bigint) + outputColumnNames: _col0, _col2 + input vertices: + 1 Map 3 + Statistics: Num rows: 36 Data size: 4021 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col2 (type: string), _col0 (type: int) + 1 _col1 (type: string), _col0 (type: int) + input vertices: + 1 Map 4 + Statistics: Num rows: 39 Data size: 4423 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 33 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cbigint (type: bigint), cstring2 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 33 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col1 (type: string), _col0 (type: bigint) + sort order: ++ + Map-reduce partition columns: _col1 (type: string), _col0 (type: bigint) + Statistics: Num rows: 33 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Map 4 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 35 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int), cstring1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 35 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col1 (type: string), _col0 (type: int) + sort order: ++ + Map-reduce partition columns: _col1 (type: string), _col0 (type: int) + Statistics: Num rows: 35 Data size: 3656 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 and hd.cint = c.cint +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 and hd.cint = c.cint +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +105 diff --git ql/src/test/results/clientpositive/tez/vector_outer_join4.q.out ql/src/test/results/clientpositive/tez/vector_outer_join4.q.out new file mode 100644 index 0000000..6dce642 --- /dev/null +++ ql/src/test/results/clientpositive/tez/vector_outer_join4.q.out @@ -0,0 +1,864 @@ +PREHOOK: query: create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc1b +POSTHOOK: query: create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc1b +PREHOOK: query: create table small_alltypesorc2b as select * from alltypesorc where cint is null and ctinyint is not null limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc2b +POSTHOOK: query: create table small_alltypesorc2b as select * from alltypesorc where cint is null and ctinyint is not null limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc2b +PREHOOK: query: create table small_alltypesorc3b as select * from alltypesorc where cint is not null and ctinyint is null limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc3b +POSTHOOK: query: create table small_alltypesorc3b as select * from alltypesorc where cint is not null and ctinyint is null limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc3b +PREHOOK: query: create table small_alltypesorc4b as select * from alltypesorc where cint is null and ctinyint is null limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc4b +POSTHOOK: query: create table small_alltypesorc4b as select * from alltypesorc where cint is null and ctinyint is null limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc4b +PREHOOK: query: create table small_alltypesorc_b stored as orc as select * from +(select * from (select * from small_alltypesorc1b) sq1 + union all + select * from (select * from small_alltypesorc2b) sq2 + union all + select * from (select * from small_alltypesorc3b) sq3 + union all + select * from (select * from small_alltypesorc4b) sq4) q +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@small_alltypesorc1b +PREHOOK: Input: default@small_alltypesorc2b +PREHOOK: Input: default@small_alltypesorc3b +PREHOOK: Input: default@small_alltypesorc4b +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc_b +POSTHOOK: query: create table small_alltypesorc_b stored as orc as select * from +(select * from (select * from small_alltypesorc1b) sq1 + union all + select * from (select * from small_alltypesorc2b) sq2 + union all + select * from (select * from small_alltypesorc3b) sq3 + union all + select * from (select * from small_alltypesorc4b) sq4) q +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@small_alltypesorc1b +POSTHOOK: Input: default@small_alltypesorc2b +POSTHOOK: Input: default@small_alltypesorc3b +POSTHOOK: Input: default@small_alltypesorc4b +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc_b +PREHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_b +PREHOOK: Output: default@small_alltypesorc_b +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_b +POSTHOOK: Output: default@small_alltypesorc_b +PREHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS FOR COLUMNS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS FOR COLUMNS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +PREHOOK: query: explain +select * +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 12 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 + Statistics: Num rows: 12 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col2 (type: int) + 1 _col2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23 + input vertices: + 1 Map 2 + Statistics: Num rows: 13 Data size: 4374 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + File Output Operator + compressed: false + Statistics: Num rows: 13 Data size: 4374 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 12 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 + Statistics: Num rows: 12 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col2 (type: int) + sort order: + + Map-reduce partition columns: _col2 (type: int) + Statistics: Num rows: 12 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean) + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select * +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select * +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +-10 -200 NULL -1818374653 -10.0 -200.0 NULL uFavNs7g58qrfyCH681d 1969-12-31 16:00:03.248 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +-21 -200 NULL NULL -21.0 -200.0 NULL NULL 1969-12-31 16:00:09.052 1969-12-31 15:59:55.451 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +-30 -200 NULL -815881183 -30.0 -200.0 NULL B0B5kG3OIl6C 1969-12-31 15:59:44.842 1969-12-31 15:59:55.451 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +-39 -200 NULL 944477914 -39.0 -200.0 NULL T8brJ213nd7rhW8XdnB1 1969-12-31 16:00:00.958 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +-63 -200 NULL 1927856372 -63.0 -200.0 NULL v6mk2b7oX 1969-12-31 16:00:06.852 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +3 -200 NULL -1438142492 3.0 -200.0 NULL T8Uakh8tudd1XRG5yKW8Y42H 1969-12-31 16:00:07.648 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +31 -200 NULL -245476731 31.0 -200.0 NULL 3E3BxP 1969-12-31 15:59:54.739 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +38 -200 NULL -1090414113 38.0 -200.0 NULL 5oQ43l 1969-12-31 16:00:05.478 1969-12-31 15:59:55.451 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +38 -200 NULL 1576772382 38.0 -200.0 NULL X3Ufbt46AUIfHe 1969-12-31 15:59:49.567 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +8 -200 NULL -1236645308 8.0 -200.0 NULL M28wJxOvunO3EImapE7OApQ 1969-12-31 15:59:46.007 1969-12-31 15:59:55.451 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +PREHOOK: query: explain +select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +PREHOOK: type: QUERY +POSTHOOK: query: explain +select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + outputColumnNames: _col0 + input vertices: + 1 Map 2 + Statistics: Num rows: 1093 Data size: 4374 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + File Output Operator + compressed: false + Statistics: Num rows: 1093 Data size: 4374 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Map-reduce partition columns: _col0 (type: tinyint) + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +-10 +-11 +-21 +-28 +-30 +-34 +-39 +-50 +-63 +16 +27 +29 +3 +31 +31 +31 +31 +31 +31 +31 +31 +31 +38 +38 +38 +38 +61 +8 +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +PREHOOK: query: explain +select count(*) from (select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from (select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 497 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), cint (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 497 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col1 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0 + input vertices: + 1 Map 3 + Statistics: Num rows: 1093 Data size: 4374 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + input vertices: + 1 Map 4 + Statistics: Num rows: 1202 Data size: 4811 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Map 4 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Map-reduce partition columns: _col0 (type: tinyint) + Statistics: Num rows: 994 Data size: 3977 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +494 diff --git ql/src/test/results/clientpositive/tez/vectorized_parquet_types.q.out ql/src/test/results/clientpositive/tez/vectorized_parquet_types.q.out new file mode 100644 index 0000000..63e1e11 --- /dev/null +++ ql/src/test/results/clientpositive/tez/vectorized_parquet_types.q.out @@ -0,0 +1,347 @@ +PREHOOK: query: DROP TABLE parquet_types_staging +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE parquet_types_staging +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE parquet_types +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE parquet_types +POSTHOOK: type: DROPTABLE +PREHOOK: query: -- init +CREATE TABLE parquet_types_staging ( + cint int, + ctinyint tinyint, + csmallint smallint, + cfloat float, + cdouble double, + cstring1 string, + t timestamp, + cchar char(5), + cvarchar varchar(10), + cbinary string, + m1 map, + l1 array, + st1 struct, + d date, + cdecimal decimal(4,2) +) ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|' +COLLECTION ITEMS TERMINATED BY ',' +MAP KEYS TERMINATED BY ':' +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@parquet_types_staging +POSTHOOK: query: -- init +CREATE TABLE parquet_types_staging ( + cint int, + ctinyint tinyint, + csmallint smallint, + cfloat float, + cdouble double, + cstring1 string, + t timestamp, + cchar char(5), + cvarchar varchar(10), + cbinary string, + m1 map, + l1 array, + st1 struct, + d date, + cdecimal decimal(4,2) +) ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|' +COLLECTION ITEMS TERMINATED BY ',' +MAP KEYS TERMINATED BY ':' +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@parquet_types_staging +PREHOOK: query: CREATE TABLE parquet_types ( + cint int, + ctinyint tinyint, + csmallint smallint, + cfloat float, + cdouble double, + cstring1 string, + t timestamp, + cchar char(5), + cvarchar varchar(10), + cbinary binary, + cdecimal decimal(4,2) +) STORED AS PARQUET +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@parquet_types +POSTHOOK: query: CREATE TABLE parquet_types ( + cint int, + ctinyint tinyint, + csmallint smallint, + cfloat float, + cdouble double, + cstring1 string, + t timestamp, + cchar char(5), + cvarchar varchar(10), + cbinary binary, + cdecimal decimal(4,2) +) STORED AS PARQUET +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@parquet_types +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@parquet_types_staging +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@parquet_types_staging +PREHOOK: query: INSERT OVERWRITE TABLE parquet_types +SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar, +unhex(cbinary), cdecimal FROM parquet_types_staging +PREHOOK: type: QUERY +PREHOOK: Input: default@parquet_types_staging +PREHOOK: Output: default@parquet_types +POSTHOOK: query: INSERT OVERWRITE TABLE parquet_types +SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar, +unhex(cbinary), cdecimal FROM parquet_types_staging +POSTHOOK: type: QUERY +POSTHOOK: Input: default@parquet_types_staging +POSTHOOK: Output: default@parquet_types +POSTHOOK: Lineage: parquet_types.cbinary EXPRESSION [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cbinary, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_types.cchar SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cchar, type:char(5), comment:null), ] +POSTHOOK: Lineage: parquet_types.cdecimal SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cdecimal, type:decimal(4,2), comment:null), ] +POSTHOOK: Lineage: parquet_types.cdouble SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: parquet_types.cfloat SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: parquet_types.cint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_types.csmallint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: parquet_types.cstring1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_types.ctinyint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +POSTHOOK: Lineage: parquet_types.cvarchar SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cvarchar, type:varchar(10), comment:null), ] +POSTHOOK: Lineage: parquet_types.t SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:t, type:timestamp, comment:null), ] +PREHOOK: query: -- select +explain +SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar, +hex(cbinary), cdecimal FROM parquet_types +PREHOOK: type: QUERY +POSTHOOK: query: -- select +explain +SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar, +hex(cbinary), cdecimal FROM parquet_types +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: parquet_types + Select Operator + expressions: cint (type: int), ctinyint (type: tinyint), csmallint (type: smallint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), t (type: timestamp), cchar (type: char(5)), cvarchar (type: varchar(10)), hex(cbinary) (type: string), cdecimal (type: decimal(4,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 + ListSink + +PREHOOK: query: SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar, +hex(cbinary), cdecimal FROM parquet_types +PREHOOK: type: QUERY +PREHOOK: Input: default@parquet_types +#### A masked pattern was here #### +POSTHOOK: query: SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar, +hex(cbinary), cdecimal FROM parquet_types +POSTHOOK: type: QUERY +POSTHOOK: Input: default@parquet_types +#### A masked pattern was here #### +100 1 1 1.0 0.0 abc 2011-01-01 01:01:01.111111111 a a B4F3CAFDBEDD 48.88 +101 2 2 1.1 0.3 def 2012-02-02 02:02:02.222222222 ab ab 68692CCAC0BDE7 8.72 +102 3 3 1.2 0.6 ghi 2013-03-03 03:03:03.333333333 abc abc B4F3CAFDBEDD 90.21 +103 1 4 1.3 0.9 jkl 2014-04-04 04:04:04.444444444 abcd abcd 68692CCAC0BDE7 3.89 +104 2 5 1.4 1.2 mno 2015-05-05 05:05:05.555555555 abcde abcde B4F3CAFDBEDD 56.23 +105 3 1 1.0 1.5 pqr 2016-06-06 06:06:06.666666666 abcde abcdef 68692CCAC0BDE7 90.21 +106 1 2 1.1 1.8 stu 2017-07-07 07:07:07.777777777 abcde abcdefg B4F3CAFDBEDD 6.09 +107 2 3 1.2 2.1 vwx 2018-08-08 08:08:08.888888888 bcdef abcdefgh 68692CCAC0BDE7 9.44 +108 3 4 1.3 2.4 yza 2019-09-09 09:09:09.999999999 cdefg B4F3CAFDBE 68656C6C6F 77.54 +109 1 5 1.4 2.7 bcd 2020-10-10 10:10:10.101010101 klmno abcdedef 68692CCAC0BDE7 25.42 +110 2 1 1.0 3.0 efg 2021-11-11 11:11:11.111111111 pqrst abcdede B4F3CAFDBEDD 60.12 +111 3 2 1.1 3.3 hij 2022-12-12 12:12:12.121212121 nopqr abcded 68692CCAC0BDE7 49.56 +112 1 3 1.2 3.6 klm 2023-01-02 13:13:13.131313131 opqrs abcdd B4F3CAFDBEDD 80.76 +113 2 4 1.3 3.9 nop 2024-02-02 14:14:14.141414141 pqrst abc 68692CCAC0BDE7 23.23 +114 3 5 1.4 4.2 qrs 2025-03-03 15:15:15.151515151 qrstu b B4F3CAFDBEDD 1.01 +115 1 1 1.0 4.5 qrs 2026-04-04 16:16:16.161616161 rstuv abcded 68692CCAC0BDE7 5.98 +116 2 2 1.1 4.8 wxy 2027-05-05 17:17:17.171717171 stuvw abcded B4F3CAFDBEDD 11.22 +117 3 3 1.2 5.1 zab 2028-06-06 18:18:18.181818181 tuvwx abcded 68692CCAC0BDE7 9.88 +118 1 4 1.3 5.4 cde 2029-07-07 19:19:19.191919191 uvwzy abcdede B4F3CAFDBEDD 4.76 +119 2 5 1.4 5.7 fgh 2030-08-08 20:20:20.202020202 vwxyz abcdede 68692CCAC0BDE7 12.83 +120 3 1 1.0 6.0 ijk 2031-09-09 21:21:21.212121212 wxyza abcde B4F3CAFDBEDD 73.04 +121 1 2 1.1 6.3 lmn 2032-10-10 22:22:22.222222222 bcdef abcde 90.33 +PREHOOK: query: explain +SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar), cdecimal, SIGN(cdecimal) FROM parquet_types +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar), cdecimal, SIGN(cdecimal) FROM parquet_types +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: parquet_types + Select Operator + expressions: cchar (type: char(5)), length(cchar) (type: int), cvarchar (type: varchar(10)), length(cvarchar) (type: int), cdecimal (type: decimal(4,2)), sign(cdecimal) (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + ListSink + +PREHOOK: query: SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar), cdecimal, SIGN(cdecimal) FROM parquet_types +PREHOOK: type: QUERY +PREHOOK: Input: default@parquet_types +#### A masked pattern was here #### +POSTHOOK: query: SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar), cdecimal, SIGN(cdecimal) FROM parquet_types +POSTHOOK: type: QUERY +POSTHOOK: Input: default@parquet_types +#### A masked pattern was here #### +a 1 a 3 48.88 1 +ab 2 ab 3 8.72 1 +abc 3 abc 3 90.21 1 +abcd 4 abcd 4 3.89 1 +abcde 5 abcde 5 56.23 1 +abcde 5 abcdef 6 90.21 1 +abcde 5 abcdefg 7 6.09 1 +bcdef 5 abcdefgh 8 9.44 1 +cdefg 5 B4F3CAFDBE 10 77.54 1 +klmno 5 abcdedef 8 25.42 1 +pqrst 5 abcdede 7 60.12 1 +nopqr 5 abcded 6 49.56 1 +opqrs 5 abcdd 5 80.76 1 +pqrst 5 abc 3 23.23 1 +qrstu 5 b 1 1.01 1 +rstuv 5 abcded 6 5.98 1 +stuvw 5 abcded 6 11.22 1 +tuvwx 5 abcded 6 9.88 1 +uvwzy 5 abcdede 7 4.76 1 +vwxyz 5 abcdede 7 12.83 1 +wxyza 5 abcde 5 73.04 1 +bcdef 5 abcde 5 90.33 1 +PREHOOK: query: explain +SELECT ctinyint, + MAX(cint), + MIN(csmallint), + COUNT(cstring1), + AVG(cfloat), + STDDEV_POP(cdouble), + MAX(cdecimal) +FROM parquet_types +GROUP BY ctinyint +ORDER BY ctinyint +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT ctinyint, + MAX(cint), + MIN(csmallint), + COUNT(cstring1), + AVG(cfloat), + STDDEV_POP(cdouble), + MAX(cdecimal) +FROM parquet_types +GROUP BY ctinyint +ORDER BY ctinyint +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: parquet_types + Statistics: Num rows: 22 Data size: 242 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), cint (type: int), csmallint (type: smallint), cstring1 (type: string), cfloat (type: float), cdouble (type: double), cdecimal (type: decimal(4,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 22 Data size: 242 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(_col1), min(_col2), count(_col3), avg(_col4), stddev_pop(_col5), max(_col6) + keys: _col0 (type: tinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 22 Data size: 242 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Map-reduce partition columns: _col0 (type: tinyint) + Statistics: Num rows: 22 Data size: 242 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: int), _col2 (type: smallint), _col3 (type: bigint), _col4 (type: struct), _col5 (type: struct), _col6 (type: decimal(4,2)) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0), min(VALUE._col1), count(VALUE._col2), avg(VALUE._col3), stddev_pop(VALUE._col4), max(VALUE._col5) + keys: KEY._col0 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 11 Data size: 121 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Statistics: Num rows: 11 Data size: 121 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: int), _col2 (type: smallint), _col3 (type: bigint), _col4 (type: double), _col5 (type: double), _col6 (type: decimal(4,2)) + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: tinyint), VALUE._col0 (type: int), VALUE._col1 (type: smallint), VALUE._col2 (type: bigint), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: decimal(4,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 11 Data size: 121 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 11 Data size: 121 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT ctinyint, + MAX(cint), + MIN(csmallint), + COUNT(cstring1), + AVG(cfloat), + STDDEV_POP(cdouble), + MAX(cdecimal) +FROM parquet_types +GROUP BY ctinyint +ORDER BY ctinyint +PREHOOK: type: QUERY +PREHOOK: Input: default@parquet_types +#### A masked pattern was here #### +POSTHOOK: query: SELECT ctinyint, + MAX(cint), + MIN(csmallint), + COUNT(cstring1), + AVG(cfloat), + STDDEV_POP(cdouble), + MAX(cdecimal) +FROM parquet_types +GROUP BY ctinyint +ORDER BY ctinyint +POSTHOOK: type: QUERY +POSTHOOK: Input: default@parquet_types +#### A masked pattern was here #### +1 121 1 8 1.1749999970197678 2.0621590627301285 90.33 +2 119 1 7 1.2142857142857142 1.8 60.12 +3 120 1 7 1.171428578240531 1.7999999999999996 90.21 diff --git ql/src/test/results/clientpositive/vector_aggregate_9.q.out ql/src/test/results/clientpositive/vector_aggregate_9.q.out index 7c010a3..f81816c 100644 --- ql/src/test/results/clientpositive/vector_aggregate_9.q.out +++ ql/src/test/results/clientpositive/vector_aggregate_9.q.out @@ -152,11 +152,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +PREHOOK: query: -- SORT_QUERY_RESULTS + +select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc PREHOOK: type: QUERY PREHOOK: Input: default@vectortab2korc #### A masked pattern was here #### -POSTHOOK: query: select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc POSTHOOK: type: QUERY POSTHOOK: Input: default@vectortab2korc #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out index 824944a..744bfb3 100644 --- ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out +++ ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out @@ -197,18 +197,22 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1 +PREHOOK: query: -- SORT_QUERY_RESULTS + +select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1 PREHOOK: type: QUERY PREHOOK: Input: default@char_join1_vc1_orc #### A masked pattern was here #### -POSTHOOK: query: select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1 POSTHOOK: type: QUERY POSTHOOK: Input: default@char_join1_vc1_orc #### A masked pattern was here #### -1 abc 2 abc 1 abc 1 abc -2 abc 2 abc +1 abc 2 abc 2 abc 1 abc +2 abc 2 abc 3 abc 3 abc PREHOOK: query: -- Join char with different length char explain select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1 @@ -289,20 +293,24 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1 +PREHOOK: query: -- SORT_QUERY_RESULTS + +select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1 PREHOOK: type: QUERY PREHOOK: Input: default@char_join1_vc1_orc PREHOOK: Input: default@char_join1_vc2_orc #### A masked pattern was here #### -POSTHOOK: query: select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1 POSTHOOK: type: QUERY POSTHOOK: Input: default@char_join1_vc1_orc POSTHOOK: Input: default@char_join1_vc2_orc #### A masked pattern was here #### -1 abc 2 abc 1 abc 1 abc -2 abc 2 abc +1 abc 2 abc 2 abc 1 abc +2 abc 2 abc 3 abc 3 abc PREHOOK: query: -- Join char with string explain select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1 @@ -382,12 +390,16 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1 +PREHOOK: query: -- SORT_QUERY_RESULTS + +select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1 PREHOOK: type: QUERY PREHOOK: Input: default@char_join1_str_orc PREHOOK: Input: default@char_join1_vc1_orc #### A masked pattern was here #### -POSTHOOK: query: select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1 POSTHOOK: type: QUERY POSTHOOK: Input: default@char_join1_str_orc POSTHOOK: Input: default@char_join1_vc1_orc diff --git ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out index 03eb445..2b4348b 100644 --- ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out +++ ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out @@ -1,34 +1,82 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE decimal_mapjoin STORED AS ORC AS - SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, - CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, - cint - FROM alltypesorc -PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@alltypesorc +PREHOOK: query: CREATE TABLE over1k(t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal(4,2), + bin binary) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@decimal_mapjoin -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE decimal_mapjoin STORED AS ORC AS - SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, - CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, - cint - FROM alltypesorc -POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@alltypesorc +PREHOOK: Output: default@over1k +POSTHOOK: query: CREATE TABLE over1k(t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal(4,2), + bin binary) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@over1k +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@over1k +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@over1k +PREHOOK: query: CREATE TABLE t1(dec decimal(4,2)) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 +POSTHOOK: query: CREATE TABLE t1(dec decimal(4,2)) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +PREHOOK: query: INSERT INTO TABLE t1 select dec from over1k +PREHOOK: type: QUERY +PREHOOK: Input: default@over1k +PREHOOK: Output: default@t1 +POSTHOOK: query: INSERT INTO TABLE t1 select dec from over1k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@over1k +POSTHOOK: Output: default@t1 +POSTHOOK: Lineage: t1.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] +PREHOOK: query: CREATE TABLE t2(dec decimal(4,0)) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t2 +POSTHOOK: query: CREATE TABLE t2(dec decimal(4,0)) STORED AS ORC +POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@decimal_mapjoin -PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 - FROM decimal_mapjoin l - JOIN decimal_mapjoin r ON l.cint = r.cint - WHERE l.cint = 6981 +POSTHOOK: Output: default@t2 +PREHOOK: query: INSERT INTO TABLE t2 select dec from over1k +PREHOOK: type: QUERY +PREHOOK: Input: default@over1k +PREHOOK: Output: default@t2 +POSTHOOK: query: INSERT INTO TABLE t2 select dec from over1k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@over1k +POSTHOOK: Output: default@t2 +POSTHOOK: Lineage: t2.dec EXPRESSION [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] +PREHOOK: query: explain +select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 - FROM decimal_mapjoin l - JOIN decimal_mapjoin r ON l.cint = r.cint - WHERE l.cint = 6981 +POSTHOOK: query: explain +select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -39,46 +87,46 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - l + t2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - l + t2 TableScan - alias: l - Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE + alias: t2 + Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (cint = 6981) (type: boolean) - Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE + predicate: dec is not null (type: boolean) + Statistics: Num rows: 525 Data size: 58800 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator keys: - 0 6981 (type: int) - 1 6981 (type: int) + 0 dec (type: decimal(6,2)) + 1 dec (type: decimal(6,2)) Stage: Stage-3 Map Reduce Map Operator Tree: TableScan - alias: r - Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE + alias: t1 + Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (cint = 6981) (type: boolean) - Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE + predicate: dec is not null (type: boolean) + Statistics: Num rows: 525 Data size: 58800 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 keys: - 0 6981 (type: int) - 1 6981 (type: int) - outputColumnNames: _col1, _col9 - Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE + 0 dec (type: decimal(6,2)) + 1 dec (type: decimal(6,2)) + outputColumnNames: _col0, _col4 + Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 6981 (type: int), 6981 (type: int), _col1 (type: decimal(20,10)), _col9 (type: decimal(23,14)) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE + expressions: _col0 (type: decimal(4,2)), _col4 (type: decimal(4,0)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -93,117 +141,123 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 - FROM decimal_mapjoin l - JOIN decimal_mapjoin r ON l.cint = r.cint - WHERE l.cint = 6981 +PREHOOK: query: -- SORT_QUERY_RESULTS + +select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_mapjoin +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 #### A masked pattern was here #### -POSTHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 - FROM decimal_mapjoin l - JOIN decimal_mapjoin r ON l.cint = r.cint - WHERE l.cint = 6981 +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_mapjoin +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 #### A masked pattern was here #### -6981 6981 -515.621072973 -617.5607769230769 -6981 6981 -515.621072973 -617.5607769230769 -6981 6981 -515.621072973 -617.5607769230769 -6981 6981 -515.621072973 -617.5607769230769 -6981 6981 -515.621072973 6984454.211097692 -6981 6981 -515.621072973 6984454.211097692 -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL -6981 6981 5831542.269248378 -617.5607769230769 -6981 6981 5831542.269248378 -617.5607769230769 -6981 6981 5831542.269248378 6984454.211097692 -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 5831542.269248378 NULL -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL -617.5607769230769 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL 6984454.211097692 -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL -6981 6981 NULL NULL +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +45 45 +45 45 +45 45 +45 45 +45 45 +6 6 +6 6 +6 6 +6 6 +6 6 +6 6 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +70 70 +70 70 +70 70 +70 70 +70 70 +70 70 +70 70 +79 79 +79 79 +79 79 +79 79 +79 79 +79 79 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 diff --git ql/src/test/results/clientpositive/vector_inner_join.q.out ql/src/test/results/clientpositive/vector_inner_join.q.out new file mode 100644 index 0000000..3e8d2f4 --- /dev/null +++ ql/src/test/results/clientpositive/vector_inner_join.q.out @@ -0,0 +1,799 @@ +PREHOOK: query: CREATE TABLE orc_table_1a(a INT) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_table_1a +POSTHOOK: query: CREATE TABLE orc_table_1a(a INT) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_table_1a +PREHOOK: query: CREATE TABLE orc_table_2a(c INT) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_table_2a +POSTHOOK: query: CREATE TABLE orc_table_2a(c INT) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_table_2a +PREHOOK: query: insert into table orc_table_1a values(1),(1), (2),(3) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@orc_table_1a +POSTHOOK: query: insert into table orc_table_1a values(1),(1), (2),(3) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@orc_table_1a +POSTHOOK: Lineage: orc_table_1a.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: insert into table orc_table_2a values(0),(2), (3),(null),(4) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@orc_table_2a +POSTHOOK: query: insert into table orc_table_2a values(0),(2), (3),(null),(4) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@orc_table_2a +POSTHOOK: Lineage: orc_table_2a.c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: explain +select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + t1 + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + t1 + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 c (type: int) + 1 a (type: int) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 c (type: int) + 1 a (type: int) + outputColumnNames: _col4 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col4 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1a +PREHOOK: Input: default@orc_table_2a +#### A masked pattern was here #### +POSTHOOK: query: select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1a +POSTHOOK: Input: default@orc_table_2a +#### A masked pattern was here #### +3 +PREHOOK: query: explain +select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + $hdt$_1:t1 + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + $hdt$_1:t1 + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: a is not null (type: boolean) + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: a (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: c (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Semi Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1a +PREHOOK: Input: default@orc_table_2a +#### A masked pattern was here #### +POSTHOOK: query: select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1a +POSTHOOK: Input: default@orc_table_2a +#### A masked pattern was here #### +3 +PREHOOK: query: CREATE TABLE orc_table_1b(v1 STRING, a INT) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_table_1b +POSTHOOK: query: CREATE TABLE orc_table_1b(v1 STRING, a INT) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_table_1b +PREHOOK: query: CREATE TABLE orc_table_2b(c INT, v2 STRING) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_table_2b +POSTHOOK: query: CREATE TABLE orc_table_2b(c INT, v2 STRING) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_table_2b +PREHOOK: query: insert into table orc_table_1b values("one", 1),("one", 1), ("two", 2),("three", 3) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__3 +PREHOOK: Output: default@orc_table_1b +POSTHOOK: query: insert into table orc_table_1b values("one", 1),("one", 1), ("two", 2),("three", 3) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__3 +POSTHOOK: Output: default@orc_table_1b +POSTHOOK: Lineage: orc_table_1b.a EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: orc_table_1b.v1 SIMPLE [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: insert into table orc_table_2b values(0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, ""),(4, "FOUR") +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__4 +PREHOOK: Output: default@orc_table_2b +POSTHOOK: query: insert into table orc_table_2b values(0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, ""),(4, "FOUR") +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__4 +POSTHOOK: Output: default@orc_table_2b +POSTHOOK: Lineage: orc_table_2b.c EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: orc_table_2b.v2 SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: explain +select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + t1 + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + t1 + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 c (type: int) + 1 a (type: int) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 c (type: int) + 1 a (type: int) + outputColumnNames: _col5, _col6 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col5 (type: string), _col6 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1b +PREHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +POSTHOOK: query: select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1b +POSTHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +three 3 +PREHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + t1 + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + t1 + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 c (type: int) + 1 a (type: int) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 c (type: int) + 1 a (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col5 (type: string), _col6 (type: int), _col0 (type: int), _col1 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1b +PREHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +POSTHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1b +POSTHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +three 3 3 THREE +PREHOOK: query: explain +select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + t1 + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + t1 + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 c (type: int) + 1 a (type: int) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 c (type: int) + 1 a (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col5 (type: string), (_col6 * 2) (type: int), (_col0 * 5) (type: int), _col1 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1b +PREHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +POSTHOOK: query: select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1b +POSTHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +three 6 15 THREE +PREHOOK: query: explain +select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + t1 + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + t1 + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 c (type: int) + 1 a (type: int) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 c (type: int) + 1 a (type: int) + outputColumnNames: _col0, _col1, _col5 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col5 (type: string), _col1 (type: string), _col0 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1b +PREHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +POSTHOOK: query: select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1b +POSTHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +three THREE 3 +PREHOOK: query: explain +select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + t1 + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + t1 + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 c (type: int) + 1 a (type: int) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 c (type: int) + 1 a (type: int) + outputColumnNames: _col1, _col5, _col6 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col6 (type: int), _col5 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1b +PREHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +POSTHOOK: query: select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1b +POSTHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +3 three THREE +PREHOOK: query: explain +select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + t1 + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + t1 + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 a (type: int) + 1 c (type: int) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 a (type: int) + 1 c (type: int) + outputColumnNames: _col0, _col5, _col6 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col6 (type: string), _col5 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1b +PREHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +POSTHOOK: query: select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1b +POSTHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +three THREE 3 +PREHOOK: query: explain +select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + t1 + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + t1 + TableScan + alias: t1 + Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 a (type: int) + 1 c (type: int) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (c > 2) (type: boolean) + Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 a (type: int) + 1 c (type: int) + outputColumnNames: _col0, _col1, _col6 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: int), _col0 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1b +PREHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +POSTHOOK: query: select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1b +POSTHOOK: Input: default@orc_table_2b +#### A masked pattern was here #### +3 three THREE diff --git ql/src/test/results/clientpositive/vector_outer_join0.q.out ql/src/test/results/clientpositive/vector_outer_join0.q.out new file mode 100644 index 0000000..886caa0 --- /dev/null +++ ql/src/test/results/clientpositive/vector_outer_join0.q.out @@ -0,0 +1,230 @@ +PREHOOK: query: CREATE TABLE orc_table_1(v1 STRING, a INT) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_table_1 +POSTHOOK: query: CREATE TABLE orc_table_1(v1 STRING, a INT) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_table_1 +PREHOOK: query: CREATE TABLE orc_table_2(c INT, v2 STRING) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_table_2 +POSTHOOK: query: CREATE TABLE orc_table_2(c INT, v2 STRING) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_table_2 +PREHOOK: query: insert into table orc_table_1 values ("", null),("one", 1),("one", 1),("two", 2),("three", 3),("", null) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@orc_table_1 +POSTHOOK: query: insert into table orc_table_1 values ("", null),("one", 1),("one", 1),("two", 2),("three", 3),("", null) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@orc_table_1 +POSTHOOK: Lineage: orc_table_1.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: orc_table_1.v1 SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: insert into table orc_table_2 values (0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, ""),(4, "FOUR"),(null, "") +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@orc_table_2 +POSTHOOK: query: insert into table orc_table_2 values (0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, ""),(4, "FOUR"),(null, "") +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@orc_table_2 +POSTHOOK: Lineage: orc_table_2.c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: orc_table_2.v2 SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: select * from orc_table_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1 +#### A masked pattern was here #### +POSTHOOK: query: select * from orc_table_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1 +#### A masked pattern was here #### + NULL + NULL +one 1 +one 1 +three 3 +two 2 +PREHOOK: query: select * from orc_table_2 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### +POSTHOOK: query: select * from orc_table_2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### +0 ZERO +2 TWO +3 THREE +4 FOUR +NULL +NULL +PREHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + t2 + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + t2 + TableScan + alias: t2 + Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 a (type: int) + 1 c (type: int) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 a (type: int) + 1 c (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: int), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1 +PREHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1 +POSTHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### + NULL NULL NULL + NULL NULL NULL +one 1 NULL NULL +one 1 NULL NULL +three 3 3 THREE +two 2 2 TWO +PREHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: type: QUERY +POSTHOOK: query: explain +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + t1 + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + t1 + TableScan + alias: t1 + Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 a (type: int) + 1 c (type: int) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Right Outer Join0 to 1 + keys: + 0 a (type: int) + 1 c (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: int), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_table_1 +PREHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_table_1 +POSTHOOK: Input: default@orc_table_2 +#### A masked pattern was here #### +NULL NULL 0 ZERO +NULL NULL 4 FOUR +NULL NULL NULL +NULL NULL NULL +three 3 3 THREE +two 2 2 TWO diff --git ql/src/test/results/clientpositive/vector_outer_join1.q.out ql/src/test/results/clientpositive/vector_outer_join1.q.out new file mode 100644 index 0000000..53e37ce --- /dev/null +++ ql/src/test/results/clientpositive/vector_outer_join1.q.out @@ -0,0 +1,534 @@ +PREHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc1a +POSTHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc1a +PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc2a +POSTHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc2a +PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc3a +POSTHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc3a +PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc4a +POSTHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc4a +PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@small_alltypesorc1a +PREHOOK: Input: default@small_alltypesorc2a +PREHOOK: Input: default@small_alltypesorc3a +PREHOOK: Input: default@small_alltypesorc4a +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc_a +POSTHOOK: query: create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@small_alltypesorc1a +POSTHOOK: Input: default@small_alltypesorc2a +POSTHOOK: Input: default@small_alltypesorc3a +POSTHOOK: Input: default@small_alltypesorc4a +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc_a +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +PREHOOK: Output: default@small_alltypesorc_a +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +POSTHOOK: Output: default@small_alltypesorc_a +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +PREHOOK: query: explain +select * +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + $hdt$_1:c + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + $hdt$_1:c + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col2 (type: int) + 1 _col2 (type: int) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col2 (type: int) + 1 _col2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23 + Statistics: Num rows: 16 Data size: 3049 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 16 Data size: 3049 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select * +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select * +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +-21 -200 NULL NULL -21.0 -200.0 NULL NULL 1969-12-31 16:00:09.052 1969-12-31 15:59:55.451 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +3 -200 NULL -1438142492 3.0 -200.0 NULL T8Uakh8tudd1XRG5yKW8Y42H 1969-12-31 16:00:07.648 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +31 -200 NULL -245476731 31.0 -200.0 NULL 3E3BxP 1969-12-31 15:59:54.739 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +38 -200 NULL -1090414113 38.0 -200.0 NULL 5oQ43l 1969-12-31 16:00:05.478 1969-12-31 15:59:55.451 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +38 -200 NULL 1576772382 38.0 -200.0 NULL X3Ufbt46AUIfHe 1969-12-31 15:59:49.567 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +PREHOOK: query: explain +select c.ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +PREHOOK: type: QUERY +POSTHOOK: query: explain +select c.ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + $hdt$_1:c + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + $hdt$_1:c + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 16 Data size: 3049 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 16 Data size: 3049 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select c.ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select c.ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +-21 +-28 +-34 +-50 +29 +3 +31 +31 +31 +31 +38 +38 +38 +38 +NULL +NULL +NULL +NULL +NULL +PREHOOK: query: explain +select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-3 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + $hdt$_0:$hdt$_1:c + Fetch Operator + limit: -1 + $hdt$_0:$hdt$_2:c + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + $hdt$_0:$hdt$_1:c + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col1 (type: int) + 1 _col0 (type: int) + $hdt$_0:$hdt$_2:c + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), cint (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col1 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 16 Data size: 3049 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 17 Data size: 3353 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(), sum(_col0) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint), _col1 (type: bigint) + Local Work: + Map Reduce Local Work + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0), sum(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.ctinyint = c.ctinyint +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +118 -14 diff --git ql/src/test/results/clientpositive/vector_outer_join2.q.out ql/src/test/results/clientpositive/vector_outer_join2.q.out new file mode 100644 index 0000000..a7b14e1 --- /dev/null +++ ql/src/test/results/clientpositive/vector_outer_join2.q.out @@ -0,0 +1,232 @@ +PREHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc1a +POSTHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc1a +PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc2a +POSTHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc2a +PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc3a +POSTHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc3a +PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc4a +POSTHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc4a +PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@small_alltypesorc1a +PREHOOK: Input: default@small_alltypesorc2a +PREHOOK: Input: default@small_alltypesorc3a +PREHOOK: Input: default@small_alltypesorc4a +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc_a +POSTHOOK: query: create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@small_alltypesorc1a +POSTHOOK: Input: default@small_alltypesorc2a +POSTHOOK: Input: default@small_alltypesorc3a +POSTHOOK: Input: default@small_alltypesorc4a +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc_a +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +PREHOOK: Output: default@small_alltypesorc_a +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +POSTHOOK: Output: default@small_alltypesorc_a +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +PREHOOK: query: explain +select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cbigint = c.cbigint +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cbigint = c.cbigint +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-3 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + $hdt$_0:$hdt$_1:c + Fetch Operator + limit: -1 + $hdt$_0:$hdt$_2:c + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + $hdt$_0:$hdt$_1:c + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + $hdt$_0:$hdt$_2:c + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cbigint (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col1 (type: bigint) + 1 _col0 (type: bigint) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int), cbigint (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col1 + Statistics: Num rows: 16 Data size: 3049 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col1 (type: bigint) + 1 _col0 (type: bigint) + outputColumnNames: _col1 + Statistics: Num rows: 17 Data size: 3353 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 17 Data size: 3353 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(), sum(_col0) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint), _col1 (type: bigint) + Local Work: + Map Reduce Local Work + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0), sum(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cbigint = c.cbigint +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cbigint = c.cbigint +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +105 -1197260954 diff --git ql/src/test/results/clientpositive/vector_outer_join3.q.out ql/src/test/results/clientpositive/vector_outer_join3.q.out new file mode 100644 index 0000000..a8ee829 --- /dev/null +++ ql/src/test/results/clientpositive/vector_outer_join3.q.out @@ -0,0 +1,509 @@ +PREHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc1a +POSTHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc1a +PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc2a +POSTHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc2a +PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc3a +POSTHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc3a +PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc4a +POSTHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null limit 5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc4a +PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@small_alltypesorc1a +PREHOOK: Input: default@small_alltypesorc2a +PREHOOK: Input: default@small_alltypesorc3a +PREHOOK: Input: default@small_alltypesorc4a +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc_a +POSTHOOK: query: create table small_alltypesorc_a stored as orc as select * from +(select * from (select * from small_alltypesorc1a) sq1 + union all + select * from (select * from small_alltypesorc2a) sq2 + union all + select * from (select * from small_alltypesorc3a) sq3 + union all + select * from (select * from small_alltypesorc4a) sq4) q +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@small_alltypesorc1a +POSTHOOK: Input: default@small_alltypesorc2a +POSTHOOK: Input: default@small_alltypesorc3a +POSTHOOK: Input: default@small_alltypesorc4a +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc_a +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +PREHOOK: Output: default@small_alltypesorc_a +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +POSTHOOK: Output: default@small_alltypesorc_a +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +PREHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-3 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + $hdt$_0:$hdt$_1:c + Fetch Operator + limit: -1 + $hdt$_0:$hdt$_2:c + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + $hdt$_0:$hdt$_1:c + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + $hdt$_0:$hdt$_2:c + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cstring1 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col1 (type: string) + 1 _col0 (type: string) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int), cstring1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col1 + Statistics: Num rows: 16 Data size: 3049 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col1 (type: string) + 1 _col0 (type: string) + Statistics: Num rows: 17 Data size: 3353 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cint = c.cint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +1005 +PREHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-3 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + $hdt$_0:$hdt$_1:c + Fetch Operator + limit: -1 + $hdt$_0:$hdt$_2:c + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + $hdt$_0:$hdt$_1:c + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cstring2 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col1 (type: string) + 1 _col0 (type: string) + $hdt$_0:$hdt$_2:c + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cstring1 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col0 (type: string) + 1 _col0 (type: string) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cstring1 (type: string), cstring2 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col1 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 16 Data size: 3049 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: string) + 1 _col0 (type: string) + Statistics: Num rows: 17 Data size: 3353 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +105 +PREHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 and hd.cint = c.cint +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 and hd.cint = c.cint +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-3 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + $hdt$_0:$hdt$_1:c + Fetch Operator + limit: -1 + $hdt$_0:$hdt$_2:c + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + $hdt$_0:$hdt$_1:c + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cbigint (type: bigint), cstring2 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col3 (type: string), _col1 (type: bigint) + 1 _col1 (type: string), _col0 (type: bigint) + $hdt$_0:$hdt$_2:c + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int), cstring1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col2 (type: string), _col0 (type: int) + 1 _col1 (type: string), _col0 (type: int) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cstring2 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 15 Data size: 2772 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col3 (type: string), _col1 (type: bigint) + 1 _col1 (type: string), _col0 (type: bigint) + outputColumnNames: _col0, _col2 + Statistics: Num rows: 16 Data size: 3049 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col2 (type: string), _col0 (type: int) + 1 _col1 (type: string), _col0 (type: int) + Statistics: Num rows: 17 Data size: 3353 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 and hd.cint = c.cint +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.cstring1 +from small_alltypesorc_a c +left outer join small_alltypesorc_a cd + on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint +left outer join small_alltypesorc_a hd + on hd.cstring1 = c.cstring1 and hd.cint = c.cint +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_a +#### A masked pattern was here #### +105 diff --git ql/src/test/results/clientpositive/vector_outer_join4.q.out ql/src/test/results/clientpositive/vector_outer_join4.q.out new file mode 100644 index 0000000..c07b1d6 --- /dev/null +++ ql/src/test/results/clientpositive/vector_outer_join4.q.out @@ -0,0 +1,857 @@ +PREHOOK: query: create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc1b +POSTHOOK: query: create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc1b +PREHOOK: query: create table small_alltypesorc2b as select * from alltypesorc where cint is null and ctinyint is not null limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc2b +POSTHOOK: query: create table small_alltypesorc2b as select * from alltypesorc where cint is null and ctinyint is not null limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc2b +PREHOOK: query: create table small_alltypesorc3b as select * from alltypesorc where cint is not null and ctinyint is null limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc3b +POSTHOOK: query: create table small_alltypesorc3b as select * from alltypesorc where cint is not null and ctinyint is null limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc3b +PREHOOK: query: create table small_alltypesorc4b as select * from alltypesorc where cint is null and ctinyint is null limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc4b +POSTHOOK: query: create table small_alltypesorc4b as select * from alltypesorc where cint is null and ctinyint is null limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc4b +PREHOOK: query: create table small_alltypesorc_b stored as orc as select * from +(select * from (select * from small_alltypesorc1b) sq1 + union all + select * from (select * from small_alltypesorc2b) sq2 + union all + select * from (select * from small_alltypesorc3b) sq3 + union all + select * from (select * from small_alltypesorc4b) sq4) q +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@small_alltypesorc1b +PREHOOK: Input: default@small_alltypesorc2b +PREHOOK: Input: default@small_alltypesorc3b +PREHOOK: Input: default@small_alltypesorc4b +PREHOOK: Output: database:default +PREHOOK: Output: default@small_alltypesorc_b +POSTHOOK: query: create table small_alltypesorc_b stored as orc as select * from +(select * from (select * from small_alltypesorc1b) sq1 + union all + select * from (select * from small_alltypesorc2b) sq2 + union all + select * from (select * from small_alltypesorc3b) sq3 + union all + select * from (select * from small_alltypesorc4b) sq4) q +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@small_alltypesorc1b +POSTHOOK: Input: default@small_alltypesorc2b +POSTHOOK: Input: default@small_alltypesorc3b +POSTHOOK: Input: default@small_alltypesorc4b +POSTHOOK: Output: database:default +POSTHOOK: Output: default@small_alltypesorc_b +PREHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_b +PREHOOK: Output: default@small_alltypesorc_b +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_b +POSTHOOK: Output: default@small_alltypesorc_b +PREHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS FOR COLUMNS +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS FOR COLUMNS +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +PREHOOK: query: explain +select * +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + $hdt$_1:c + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + $hdt$_1:c + TableScan + alias: c + Statistics: Num rows: 30 Data size: 5670 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 + Statistics: Num rows: 30 Data size: 5670 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col2 (type: int) + 1 _col2 (type: int) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 30 Data size: 5670 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 + Statistics: Num rows: 30 Data size: 5670 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col2 (type: int) + 1 _col2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23 + Statistics: Num rows: 33 Data size: 6237 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 33 Data size: 6237 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select * +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select * +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +-10 -200 NULL -1818374653 -10.0 -200.0 NULL uFavNs7g58qrfyCH681d 1969-12-31 16:00:03.248 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +-21 -200 NULL NULL -21.0 -200.0 NULL NULL 1969-12-31 16:00:09.052 1969-12-31 15:59:55.451 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +-30 -200 NULL -815881183 -30.0 -200.0 NULL B0B5kG3OIl6C 1969-12-31 15:59:44.842 1969-12-31 15:59:55.451 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +-39 -200 NULL 944477914 -39.0 -200.0 NULL T8brJ213nd7rhW8XdnB1 1969-12-31 16:00:00.958 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +-63 -200 NULL 1927856372 -63.0 -200.0 NULL v6mk2b7oX 1969-12-31 16:00:06.852 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +3 -200 NULL -1438142492 3.0 -200.0 NULL T8Uakh8tudd1XRG5yKW8Y42H 1969-12-31 16:00:07.648 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +31 -200 NULL -245476731 31.0 -200.0 NULL 3E3BxP 1969-12-31 15:59:54.739 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +38 -200 NULL -1090414113 38.0 -200.0 NULL 5oQ43l 1969-12-31 16:00:05.478 1969-12-31 15:59:55.451 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +38 -200 NULL 1576772382 38.0 -200.0 NULL X3Ufbt46AUIfHe 1969-12-31 15:59:49.567 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +8 -200 NULL -1236645308 8.0 -200.0 NULL M28wJxOvunO3EImapE7OApQ 1969-12-31 15:59:46.007 1969-12-31 15:59:55.451 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL -50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL 61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -11534 528534767 NULL NULL -11534.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:55.407 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4016 528534767 NULL NULL -4016.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:07.209 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -6147 528534767 NULL NULL -6147.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.839 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7314 528534767 NULL NULL -7314.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:06.913 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL -7680 528534767 NULL NULL -7680.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:11.525 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 11254 528534767 NULL NULL 11254.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.151 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 13889 528534767 NULL NULL 13889.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:00.423 true NULL +NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL NULL 3321 528534767 NULL NULL 3321.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:52.967 true NULL +PREHOOK: query: explain +select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +PREHOOK: type: QUERY +POSTHOOK: query: explain +select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + $hdt$_1:c + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + $hdt$_1:c + TableScan + alias: c + Statistics: Num rows: 30 Data size: 5670 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 30 Data size: 5670 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 30 Data size: 5670 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 30 Data size: 5670 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 33 Data size: 6237 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 33 Data size: 6237 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +-10 +-11 +-21 +-28 +-30 +-34 +-39 +-50 +-63 +16 +27 +29 +3 +31 +31 +31 +31 +31 +31 +31 +31 +31 +38 +38 +38 +38 +61 +8 +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +PREHOOK: query: explain +select count(*) from (select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +) t1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from (select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +) t1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-8 is a root stage + Stage-3 depends on stages: Stage-8 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-8 + Map Reduce Local Work + Alias -> Map Local Tables: + $hdt$_0:$hdt$_1:c + Fetch Operator + limit: -1 + $hdt$_0:$hdt$_2:c + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + $hdt$_0:$hdt$_1:c + TableScan + alias: c + Statistics: Num rows: 30 Data size: 5670 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 30 Data size: 5670 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col1 (type: int) + 1 _col0 (type: int) + $hdt$_0:$hdt$_2:c + TableScan + alias: c + Statistics: Num rows: 30 Data size: 5670 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: _col0 + Statistics: Num rows: 30 Data size: 5670 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 30 Data size: 5670 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), cint (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 30 Data size: 5670 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col1 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 33 Data size: 6237 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: tinyint) + 1 _col0 (type: tinyint) + Statistics: Num rows: 36 Data size: 6860 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select count(*) from (select c.ctinyint +from small_alltypesorc_b c +left outer join small_alltypesorc_b cd + on cd.cint = c.cint +left outer join small_alltypesorc_b hd + on hd.ctinyint = c.ctinyint +) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@small_alltypesorc_b +#### A masked pattern was here #### +494 diff --git serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java index 2488a44..acb51f9 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java +++ serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java @@ -64,6 +64,14 @@ public WriteBuffers(int wbSize, long maxSize) { nextBufferToWrite(); } + public int readVInt() { + return (int) readVLong(defaultReadPos); + } + + public int readVInt(Position readPos) { + return (int) readVLong(readPos); + } + public long readVLong() { return readVLong(defaultReadPos); } @@ -358,6 +366,19 @@ public long getReadPoint(Position readPos) { return (readPos.bufferIndex * (long)wbSize) + readPos.offset; } + public void getByteSegmentRefToCurrent(ByteSegmentRef byteSegmentRef, int length, + Position readPos) { + + byteSegmentRef.reset((readPos.bufferIndex * (long)wbSize) + readPos.offset, length); + if (length > 0) { + populateValue(byteSegmentRef); + } + } + + public void writeVInt(int value) { + LazyBinaryUtils.writeVInt(this, value); + } + public void writeVLong(long value) { LazyBinaryUtils.writeVLong(this, value); } @@ -425,7 +446,9 @@ public int getLength() { } public ByteBuffer copy() { byte[] copy = new byte[length]; - System.arraycopy(bytes, (int)offset, copy, 0, length); + if (length > 0) { + System.arraycopy(bytes, (int)offset, copy, 0, length); + } return ByteBuffer.wrap(copy); } private byte[] bytes = null; diff --git serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java index 6b2675c..285ae10 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java +++ serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java @@ -85,7 +85,7 @@ private BinarySortableSerializeWrite() { } /* - * Set the buffer that will receive the serialized data. + * Set the buffer that will receive the serialized data. The output buffer will be reset. */ @Override public void set(Output output) { @@ -95,6 +95,15 @@ public void set(Output output) { } /* + * Set the buffer that will receive the serialized data. The output buffer will NOT be reset. + */ + @Override + public void setAppend(Output output) { + this.output = output; + index = -1; + } + + /* * Reset the previously supplied buffer that will receive the serialized data. */ @Override diff --git serde/src/java/org/apache/hadoop/hive/serde2/fast/SerializeWrite.java serde/src/java/org/apache/hadoop/hive/serde2/fast/SerializeWrite.java index 8e586fb..e6fb8b6 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/fast/SerializeWrite.java +++ serde/src/java/org/apache/hadoop/hive/serde2/fast/SerializeWrite.java @@ -31,19 +31,25 @@ /* * Directly serialize with the caller writing field-by-field a serialization format. - * + * * The caller is responsible for calling the write method for the right type of each field * (or calling writeNull if the field is a NULL). - * + * */ public interface SerializeWrite { /* - * Set the buffer that will receive the serialized data. + * Set the buffer that will receive the serialized data. The output buffer will be reset. */ void set(Output output); /* + * Set the buffer that will receive the serialized data. The output buffer will NOT be reset. + */ + void setAppend(Output output); + + + /* * Reset the previously supplied buffer that will receive the serialized data. */ void reset(); diff --git serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java index 0771b12..77838a1 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java +++ serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java @@ -106,7 +106,7 @@ private LazySimpleSerializeWrite() { } /* - * Set the buffer that will receive the serialized data. + * Set the buffer that will receive the serialized data. The output buffer will be reset. */ @Override public void set(Output output) { @@ -116,6 +116,15 @@ public void set(Output output) { } /* + * Set the buffer that will receive the serialized data. The output buffer will NOT be reset. + */ + @Override + public void setAppend(Output output) { + this.output = output; + index = 0; + } + + /* * Reset the previously supplied buffer that will receive the serialized data. */ @Override @@ -123,7 +132,7 @@ public void reset() { output.reset(); index = 0; } - + /* * General Pattern: * diff --git serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/fast/LazyBinarySerializeWrite.java serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/fast/LazyBinarySerializeWrite.java index 8cb2741..e0d9c0a 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/fast/LazyBinarySerializeWrite.java +++ serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/fast/LazyBinarySerializeWrite.java @@ -71,7 +71,7 @@ private LazyBinarySerializeWrite() { } /* - * Set the buffer that will receive the serialized data. + * Set the buffer that will receive the serialized data. The output buffer will be reset. */ @Override public void set(Output output) { @@ -83,6 +83,17 @@ public void set(Output output) { } /* + * Set the buffer that will receive the serialized data. The output buffer will NOT be reset. + */ + @Override + public void setAppend(Output output) { + this.output = output; + fieldIndex = 0; + nullByte = 0; + nullOffset = output.getLength(); + } + + /* * Reset the previously supplied buffer that will receive the serialized data. */ @Override @@ -92,7 +103,7 @@ public void reset() { nullByte = 0; nullOffset = 0; } - + /* * General Pattern: *