Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 1437268) +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy) @@ -1286,7 +1286,7 @@ return null; } return new Partition(mpart.getValues(), dbName, tblName, mpart.getCreateTime(), - mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd(), true), + mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd(), false), mpart.getParameters()); } Index: metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (revision 1437268) +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (working copy) @@ -157,7 +157,9 @@ * hadoop config * @param schema * the properties to use to instantiate the deserializer - * @return the Deserializer + * @return + * Returns instantiated deserializer by looking up class name of deserializer stored in passed + * in properties. Also, initializes the deserializer with schema stored in passed in properties. * @exception MetaException * if any problems instantiating the Deserializer * @@ -189,7 +191,10 @@ * - hadoop config * @param table * the table - * @return the Deserializer + * @return + * Returns instantiated deserializer by looking up class name of deserializer stored in + * storage descriptor of passed in table. Also, initializes the deserializer with schema + * of table. * @exception MetaException * if any problems instantiating the Deserializer * @@ -204,7 +209,7 @@ } try { Deserializer deserializer = SerDeUtils.lookupDeserializer(lib); - deserializer.initialize(conf, MetaStoreUtils.getSchema(table)); + deserializer.initialize(conf, MetaStoreUtils.getTableMetadata(table)); return deserializer; } catch (RuntimeException e) { throw e; @@ -226,7 +231,10 @@ * @param part * the partition * @param table the table - * @return the Deserializer + * @return + * Returns instantiated deserializer by looking up class name of deserializer stored in + * storage descriptor of passed in partition. Also, initializes the deserializer with + * schema of partition. * @exception MetaException * if any problems instantiating the Deserializer * @@ -237,7 +245,7 @@ String lib = part.getSd().getSerdeInfo().getSerializationLib(); try { Deserializer deserializer = SerDeUtils.lookupDeserializer(lib); - deserializer.initialize(conf, MetaStoreUtils.getSchema(part, table)); + deserializer.initialize(conf, MetaStoreUtils.getPartitionMetadata(part, table)); return deserializer; } catch (RuntimeException e) { throw e; @@ -493,12 +501,21 @@ return ddl.toString(); } - public static Properties getSchema( + public static Properties getTableMetadata( org.apache.hadoop.hive.metastore.api.Table table) { return MetaStoreUtils.getSchema(table.getSd(), table.getSd(), table .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys()); } + public static Properties getPartitionMetadata( + org.apache.hadoop.hive.metastore.api.Partition partition, + org.apache.hadoop.hive.metastore.api.Table table) { + return MetaStoreUtils + .getSchema(partition.getSd(), partition.getSd(), partition + .getParameters(), table.getDbName(), table.getTableName(), + table.getPartitionKeys()); + } + public static Properties getSchema( org.apache.hadoop.hive.metastore.api.Partition part, org.apache.hadoop.hive.metastore.api.Table table) { Index: common/src/java/org/apache/hadoop/hive/common/ObjectPair.java =================================================================== --- common/src/java/org/apache/hadoop/hive/common/ObjectPair.java (revision 0) +++ common/src/java/org/apache/hadoop/hive/common/ObjectPair.java (working copy) @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.common; + +public class ObjectPair { + private F first; + private S second; + + public ObjectPair() {} + + public ObjectPair(F first, S second) { + this.first = first; + this.second = second; + } + + public F getFirst() { + return first; + } + + public void setFirst(F first) { + this.first = first; + } + + public S getSecond() { + return second; + } + + public void setSecond(S second) { + this.second = second; + } +} Index: serde/src/java/org/apache/hadoop/hive/serde2/NullStructSerDe.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/NullStructSerDe.java (revision 1437268) +++ serde/src/java/org/apache/hadoop/hive/serde2/NullStructSerDe.java (working copy) @@ -56,32 +56,11 @@ return null; } + private static ObjectInspector nullStructOI = new NullStructSerDeObjectInspector(); + @Override public ObjectInspector getObjectInspector() throws SerDeException { - return new StructObjectInspector() { - public String getTypeName() { - return "null"; - } - public Category getCategory() { - return Category.PRIMITIVE; - } - @Override - public StructField getStructFieldRef(String fieldName) { - return null; - } - @Override - public List getAllStructFieldRefs() { - return new ArrayList(); - } - @Override - public Object getStructFieldData(Object data, StructField fieldRef) { - return null; - } - @Override - public List getStructFieldsDataAsList(Object data) { - return new ArrayList(); - } - }; + return nullStructOI; } @Override @@ -103,4 +82,38 @@ return NullWritable.get(); } + + /** + * A object inspector for null struct serde. + */ + public static class NullStructSerDeObjectInspector extends StructObjectInspector { + public String getTypeName() { + return "null"; + } + + public Category getCategory() { + return Category.PRIMITIVE; + } + + @Override + public StructField getStructFieldRef(String fieldName) { + return null; + } + + @Override + public List getAllStructFieldRefs() { + return new ArrayList(); + } + + @Override + public Object getStructFieldData(Object data, StructField fieldRef) { + return null; + } + + @Override + public List getStructFieldsDataAsList(Object data) { + return new ArrayList(); + } + } + } Index: serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/SettableStructObjectInspector.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/SettableStructObjectInspector.java (revision 1437268) +++ serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/SettableStructObjectInspector.java (working copy) @@ -34,4 +34,9 @@ */ public abstract Object setStructFieldData(Object struct, StructField field, Object fieldValue); + + @Override + public boolean isSettable() { + return true; + } } Index: serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/StructObjectInspector.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/StructObjectInspector.java (revision 1437268) +++ serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/StructObjectInspector.java (working copy) @@ -47,6 +47,10 @@ */ public abstract List getStructFieldsDataAsList(Object data); + public boolean isSettable() { + return false; + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); Index: serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java (revision 1437268) +++ serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java (working copy) @@ -24,9 +24,9 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaStringObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorConverter; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableBooleanObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableBigDecimalObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableBinaryObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableBooleanObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableByteObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableDoubleObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableFloatObjectInspector; @@ -34,8 +34,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableLongObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableShortObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableTimestampObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.VoidObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableStringObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.VoidObjectInspector; /** * ObjectInspectorConverters. @@ -60,6 +60,64 @@ } } + private static Converter getConverter(PrimitiveObjectInspector inputOI, + PrimitiveObjectInspector outputOI) { + switch (outputOI.getPrimitiveCategory()) { + case BOOLEAN: + return new PrimitiveObjectInspectorConverter.BooleanConverter( + inputOI, + (SettableBooleanObjectInspector) outputOI); + case BYTE: + return new PrimitiveObjectInspectorConverter.ByteConverter( + inputOI, + (SettableByteObjectInspector) outputOI); + case SHORT: + return new PrimitiveObjectInspectorConverter.ShortConverter( + inputOI, + (SettableShortObjectInspector) outputOI); + case INT: + return new PrimitiveObjectInspectorConverter.IntConverter( + inputOI, + (SettableIntObjectInspector) outputOI); + case LONG: + return new PrimitiveObjectInspectorConverter.LongConverter( + inputOI, + (SettableLongObjectInspector) outputOI); + case FLOAT: + return new PrimitiveObjectInspectorConverter.FloatConverter( + inputOI, + (SettableFloatObjectInspector) outputOI); + case DOUBLE: + return new PrimitiveObjectInspectorConverter.DoubleConverter( + inputOI, + (SettableDoubleObjectInspector) outputOI); + case STRING: + if (outputOI instanceof WritableStringObjectInspector) { + return new PrimitiveObjectInspectorConverter.TextConverter( + inputOI); + } else if (outputOI instanceof JavaStringObjectInspector) { + return new PrimitiveObjectInspectorConverter.StringConverter( + inputOI); + } + case TIMESTAMP: + return new PrimitiveObjectInspectorConverter.TimestampConverter( + inputOI, + (SettableTimestampObjectInspector) outputOI); + case BINARY: + return new PrimitiveObjectInspectorConverter.BinaryConverter( + inputOI, + (SettableBinaryObjectInspector)outputOI); + case DECIMAL: + return new PrimitiveObjectInspectorConverter.BigDecimalConverter( + (PrimitiveObjectInspector) inputOI, + (SettableBigDecimalObjectInspector) outputOI); + default: + throw new RuntimeException("Hive internal error: conversion of " + + inputOI.getTypeName() + " to " + outputOI.getTypeName() + + " not supported yet."); + } + } + /** * Returns a converter that converts objects from one OI to another OI. The * returned (converted) object belongs to this converter, so that it can be @@ -69,66 +127,12 @@ ObjectInspector outputOI) { // If the inputOI is the same as the outputOI, just return an // IdentityConverter. - if (inputOI == outputOI) { + if (inputOI.equals(outputOI)) { return new IdentityConverter(); } switch (outputOI.getCategory()) { case PRIMITIVE: - switch (((PrimitiveObjectInspector) outputOI).getPrimitiveCategory()) { - case BOOLEAN: - return new PrimitiveObjectInspectorConverter.BooleanConverter( - (PrimitiveObjectInspector) inputOI, - (SettableBooleanObjectInspector) outputOI); - case BYTE: - return new PrimitiveObjectInspectorConverter.ByteConverter( - (PrimitiveObjectInspector) inputOI, - (SettableByteObjectInspector) outputOI); - case SHORT: - return new PrimitiveObjectInspectorConverter.ShortConverter( - (PrimitiveObjectInspector) inputOI, - (SettableShortObjectInspector) outputOI); - case INT: - return new PrimitiveObjectInspectorConverter.IntConverter( - (PrimitiveObjectInspector) inputOI, - (SettableIntObjectInspector) outputOI); - case LONG: - return new PrimitiveObjectInspectorConverter.LongConverter( - (PrimitiveObjectInspector) inputOI, - (SettableLongObjectInspector) outputOI); - case FLOAT: - return new PrimitiveObjectInspectorConverter.FloatConverter( - (PrimitiveObjectInspector) inputOI, - (SettableFloatObjectInspector) outputOI); - case DOUBLE: - return new PrimitiveObjectInspectorConverter.DoubleConverter( - (PrimitiveObjectInspector) inputOI, - (SettableDoubleObjectInspector) outputOI); - case STRING: - if (outputOI instanceof WritableStringObjectInspector) { - return new PrimitiveObjectInspectorConverter.TextConverter( - (PrimitiveObjectInspector) inputOI); - } else if (outputOI instanceof JavaStringObjectInspector) { - return new PrimitiveObjectInspectorConverter.StringConverter( - (PrimitiveObjectInspector) inputOI); - } - case TIMESTAMP: - return new PrimitiveObjectInspectorConverter.TimestampConverter( - (PrimitiveObjectInspector) inputOI, - (SettableTimestampObjectInspector) outputOI); - case BINARY: - return new PrimitiveObjectInspectorConverter.BinaryConverter( - (PrimitiveObjectInspector)inputOI, - (SettableBinaryObjectInspector)outputOI); - case DECIMAL: - return new PrimitiveObjectInspectorConverter.BigDecimalConverter( - (PrimitiveObjectInspector) inputOI, - (SettableBigDecimalObjectInspector) outputOI); - - default: - throw new RuntimeException("Hive internal error: conversion of " - + inputOI.getTypeName() + " to " + outputOI.getTypeName() - + " not supported yet."); - } + return getConverter((PrimitiveObjectInspector) inputOI, (PrimitiveObjectInspector) outputOI); case STRUCT: return new StructConverter(inputOI, (SettableStructObjectInspector) outputOI); @@ -145,6 +149,50 @@ } } + public static ObjectInspector getConvertedOI( + ObjectInspector inputOI, + ObjectInspector outputOI) { + // If the inputOI is the same as the outputOI, just return it + if (inputOI.equals(outputOI)) { + return outputOI; + } + switch (outputOI.getCategory()) { + case PRIMITIVE: + return outputOI; + case STRUCT: + StructObjectInspector structOutputOI = (StructObjectInspector) outputOI; + if (structOutputOI.isSettable()) { + return outputOI; + } + else { + // create a standard settable struct object inspector + List listFields = structOutputOI.getAllStructFieldRefs(); + List structFieldNames = new ArrayList(listFields.size()); + List structFieldObjectInspectors = new ArrayList( + listFields.size()); + + for (StructField listField : listFields) { + structFieldNames.add(listField.getFieldName()); + structFieldObjectInspectors.add(listField.getFieldObjectInspector()); + } + + StandardStructObjectInspector structStandardOutputOI = ObjectInspectorFactory + .getStandardStructObjectInspector( + structFieldNames, + structFieldObjectInspectors); + return structStandardOutputOI; + } + case LIST: + return outputOI; + case MAP: + return outputOI; + default: + throw new RuntimeException("Hive internal error: conversion of " + + inputOI.getTypeName() + " to " + outputOI.getTypeName() + + " not supported yet."); + } + } + /** * A converter class for List. */ @@ -226,10 +274,11 @@ this.outputOI = outputOI; inputFields = this.inputOI.getAllStructFieldRefs(); outputFields = outputOI.getAllStructFieldRefs(); - assert (inputFields.size() == outputFields.size()); - fieldConverters = new ArrayList(inputFields.size()); - for (int f = 0; f < inputFields.size(); f++) { + // If the output has some extra fields, set them to NULL. + int minFields = Math.min(inputFields.size(), outputFields.size()); + fieldConverters = new ArrayList(minFields); + for (int f = 0; f < minFields; f++) { fieldConverters.add(getConverter(inputFields.get(f) .getFieldObjectInspector(), outputFields.get(f) .getFieldObjectInspector())); @@ -248,15 +297,19 @@ return null; } + int minFields = Math.min(inputFields.size(), outputFields.size()); // Convert the fields - for (int f = 0; f < inputFields.size(); f++) { - Object inputFieldValue = inputOI.getStructFieldData(input, inputFields - .get(f)); - Object outputFieldValue = fieldConverters.get(f).convert( - inputFieldValue); - outputOI.setStructFieldData(output, outputFields.get(f), - outputFieldValue); + for (int f = 0; f < minFields; f++) { + Object inputFieldValue = inputOI.getStructFieldData(input, inputFields.get(f)); + Object outputFieldValue = fieldConverters.get(f).convert(inputFieldValue); + outputOI.setStructFieldData(output, outputFields.get(f), outputFieldValue); } + + // set the extra fields to null + for (int f = minFields; f < outputFields.size(); f++) { + outputOI.setStructFieldData(output, outputFields.get(f), null); + } + return output; } } Index: ql/src/test/results/clientpositive/bucketmapjoin5.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin5.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketmapjoin5.q.out (working copy) @@ -253,7 +253,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -301,7 +300,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -813,7 +811,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_2 numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -861,7 +858,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_2 numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/pcr.q.out =================================================================== --- ql/src/test/results/clientpositive/pcr.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/pcr.q.out (working copy) @@ -123,7 +123,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -169,7 +168,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -323,7 +321,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -369,7 +366,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -415,7 +411,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -613,7 +608,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -659,7 +653,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -821,7 +814,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -867,7 +859,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1031,7 +1022,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1077,7 +1067,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1123,7 +1112,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1298,7 +1286,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1344,7 +1331,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1390,7 +1376,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1569,7 +1554,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1615,7 +1599,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1756,7 +1739,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1802,7 +1784,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1983,7 +1964,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -2029,7 +2009,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -2075,7 +2054,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -2290,7 +2268,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -2336,7 +2313,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -2495,7 +2471,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -2773,7 +2748,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -2819,7 +2793,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -3112,7 +3085,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -3158,7 +3130,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -3204,7 +3175,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -3250,7 +3220,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -3454,7 +3423,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -3500,7 +3468,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -3546,7 +3513,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -3822,7 +3788,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -4382,7 +4347,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -4938,7 +4902,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -5111,7 +5074,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -5158,7 +5120,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -5337,7 +5298,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -5384,7 +5344,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/load_dyn_part8.q.out =================================================================== --- ql/src/test/results/clientpositive/load_dyn_part8.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/load_dyn_part8.q.out (working copy) @@ -157,7 +157,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -204,7 +203,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -251,7 +249,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -298,7 +295,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/groupby_sort_6.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_sort_6.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/groupby_sort_6.q.out (working copy) @@ -421,7 +421,6 @@ #### A masked pattern was here #### name default.t1 numFiles 1 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/filter_join_breaktask.q.out =================================================================== --- ql/src/test/results/clientpositive/filter_join_breaktask.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/filter_join_breaktask.q.out (working copy) @@ -101,7 +101,6 @@ #### A masked pattern was here #### name default.filter_join_breaktask numFiles 1 - numPartitions 1 numRows 25 partition_columns ds rawDataSize 211 @@ -231,7 +230,6 @@ #### A masked pattern was here #### name default.filter_join_breaktask numFiles 1 - numPartitions 1 numRows 25 partition_columns ds rawDataSize 211 Index: ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out =================================================================== --- ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out (working copy) @@ -120,7 +120,6 @@ partition values: part 1 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 1 bucket_field_name key columns key,value @@ -128,7 +127,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 1 - numPartitions 1 numRows 500 partition_columns part rawDataSize 5312 Index: ql/src/test/results/clientpositive/input_part9.q.out =================================================================== --- ql/src/test/results/clientpositive/input_part9.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/input_part9.q.out (working copy) @@ -71,7 +71,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -118,7 +117,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/partition_wise_fileformat14.q.out =================================================================== --- ql/src/test/results/clientpositive/partition_wise_fileformat14.q.out (revision 0) +++ ql/src/test/results/clientpositive/partition_wise_fileformat14.q.out (working copy) @@ -0,0 +1,234 @@ +PREHOOK: query: CREATE TABLE tbl1(key int, value string) PARTITIONED by (ds string) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS rcfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) PARTITIONED by (ds string) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@tbl1 +PREHOOK: query: CREATE TABLE tbl2(key int, value string) PARTITIONED by (ds string) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS rcfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE tbl2(key int, value string) PARTITIONED by (ds string) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@tbl2 +PREHOOK: query: alter table tbl1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: default@tbl1 +PREHOOK: Output: default@tbl1 +POSTHOOK: query: alter table tbl1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: default@tbl1 +POSTHOOK: Output: default@tbl1 +PREHOOK: query: alter table tbl2 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: default@tbl2 +PREHOOK: Output: default@tbl2 +POSTHOOK: query: alter table tbl2 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: default@tbl2 +POSTHOOK: Output: default@tbl2 +PREHOOK: query: insert overwrite table tbl1 partition (ds='1') select * from src where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl1@ds=1 +POSTHOOK: query: insert overwrite table tbl1 partition (ds='1') select * from src where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl1@ds=1 +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tbl2 partition (ds='1') select * from src where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl2@ds=1 +POSTHOOK: query: insert overwrite table tbl2 partition (ds='1') select * from src where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl2@ds=1 +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table tbl1 change key key int +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@tbl1 +PREHOOK: Output: default@tbl1 +POSTHOOK: query: alter table tbl1 change key key int +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@tbl1 +POSTHOOK: Output: default@tbl1 +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tbl1 partition (ds='2') select * from src where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl1@ds=2 +POSTHOOK: query: insert overwrite table tbl1 partition (ds='2') select * from src where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl1@ds=2 +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table tbl1 change key key string +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@tbl1 +PREHOOK: Output: default@tbl1 +POSTHOOK: query: alter table tbl1 change key key string +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@tbl1 +POSTHOOK: Output: default@tbl1 +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- The subquery itself is being map-joined. Multiple partitions of tbl1 with different schemas are being read for tbl2 +select /*+mapjoin(subq1)*/ count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl1@ds=1 +PREHOOK: Input: default@tbl1@ds=2 +PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl2@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: -- The subquery itself is being map-joined. Multiple partitions of tbl1 with different schemas are being read for tbl2 +select /*+mapjoin(subq1)*/ count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl1@ds=1 +POSTHOOK: Input: default@tbl1@ds=2 +POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl2@ds=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +40 +PREHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should +-- be converted to a bucketized mapside join. Multiple partitions of tbl1 with different schemas are being read for each +-- bucket of tbl2 +select /*+mapjoin(subq1)*/ count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl1@ds=1 +PREHOOK: Input: default@tbl1@ds=2 +PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl2@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should +-- be converted to a bucketized mapside join. Multiple partitions of tbl1 with different schemas are being read for each +-- bucket of tbl2 +select /*+mapjoin(subq1)*/ count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl1@ds=1 +POSTHOOK: Input: default@tbl1@ds=2 +POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl2@ds=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +40 +PREHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should +-- be converted to a sort-merge join. Multiple partitions of tbl1 with different schemas are being read for a +-- given file of tbl2 +select /*+mapjoin(subq1)*/ count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl1@ds=1 +PREHOOK: Input: default@tbl1@ds=2 +PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl2@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should +-- be converted to a sort-merge join. Multiple partitions of tbl1 with different schemas are being read for a +-- given file of tbl2 +select /*+mapjoin(subq1)*/ count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl1@ds=1 +POSTHOOK: Input: default@tbl1@ds=2 +POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl2@ds=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +40 +PREHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side +-- join should be performed. Multiple partitions of tbl1 with different schemas are being read for tbl2 +select /*+mapjoin(subq1)*/ count(*) from + (select a.key+1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + join + (select a.key+1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl1@ds=1 +PREHOOK: Input: default@tbl1@ds=2 +PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl2@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side +-- join should be performed. Multiple partitions of tbl1 with different schemas are being read for tbl2 +select /*+mapjoin(subq1)*/ count(*) from + (select a.key+1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + join + (select a.key+1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl1@ds=1 +POSTHOOK: Input: default@tbl1@ds=2 +POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl2@ds=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +44 Index: ql/src/test/results/clientpositive/join9.q.out =================================================================== --- ql/src/test/results/clientpositive/join9.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/join9.q.out (working copy) @@ -120,7 +120,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/rand_partitionpruner3.q.out =================================================================== --- ql/src/test/results/clientpositive/rand_partitionpruner3.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/rand_partitionpruner3.q.out (working copy) @@ -73,7 +73,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -201,7 +200,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketcontext_4.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketcontext_4.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketcontext_4.q.out (working copy) @@ -162,7 +162,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -170,7 +169,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 @@ -357,7 +355,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -365,7 +362,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/merge3.q.out =================================================================== --- ql/src/test/results/clientpositive/merge3.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/merge3.q.out (working copy) @@ -2441,7 +2441,6 @@ #### A masked pattern was here #### name default.merge_src_part numFiles 2 - numPartitions 2 numRows 1000 partition_columns ds rawDataSize 10624 @@ -2487,7 +2486,6 @@ #### A masked pattern was here #### name default.merge_src_part numFiles 2 - numPartitions 2 numRows 1000 partition_columns ds rawDataSize 10624 @@ -4873,7 +4871,6 @@ #### A masked pattern was here #### name default.merge_src_part numFiles 2 - numPartitions 2 numRows 1000 partition_columns ds rawDataSize 10624 @@ -4919,7 +4916,6 @@ #### A masked pattern was here #### name default.merge_src_part numFiles 2 - numPartitions 2 numRows 1000 partition_columns ds rawDataSize 10624 Index: ql/src/test/results/clientpositive/bucketmapjoin9.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin9.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketmapjoin9.q.out (working copy) @@ -151,7 +151,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 0 partition_columns part rawDataSize 0 @@ -417,7 +416,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 0 partition_columns part rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin13.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin13.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketmapjoin13.q.out (working copy) @@ -179,7 +179,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 500 partition_columns part rawDataSize 5312 @@ -227,7 +226,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 500 partition_columns part rawDataSize 5312 @@ -478,7 +476,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 500 partition_columns part rawDataSize 5312 @@ -740,7 +737,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 500 partition_columns part rawDataSize 5312 @@ -1004,7 +1000,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 500 partition_columns part rawDataSize 5312 Index: ql/src/test/results/clientpositive/columnstats_partlvl.q.out =================================================================== --- ql/src/test/results/clientpositive/columnstats_partlvl.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/columnstats_partlvl.q.out (working copy) @@ -145,7 +145,6 @@ #### A masked pattern was here #### name default.employee_part numFiles 1 - numPartitions 2 numRows 0 partition_columns employeesalary rawDataSize 0 @@ -352,7 +351,6 @@ #### A masked pattern was here #### name default.employee_part numFiles 1 - numPartitions 2 numRows 0 partition_columns employeesalary rawDataSize 0 Index: ql/src/test/results/clientpositive/sample8.q.out =================================================================== --- ql/src/test/results/clientpositive/sample8.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/sample8.q.out (working copy) @@ -85,7 +85,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -132,7 +131,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -179,7 +177,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -226,7 +223,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/router_join_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/router_join_ppr.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/router_join_ppr.q.out (working copy) @@ -136,7 +136,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -183,7 +182,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -230,7 +228,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -277,7 +274,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -546,7 +542,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -593,7 +588,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -853,7 +847,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -900,7 +893,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1160,7 +1152,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1207,7 +1198,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1254,7 +1244,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1301,7 +1290,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/input42.q.out =================================================================== --- ql/src/test/results/clientpositive/input42.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/input42.q.out (working copy) @@ -66,7 +66,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -113,7 +112,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1258,7 +1256,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1305,7 +1302,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1828,7 +1824,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1875,7 +1870,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/partition_wise_fileformat13.q.out =================================================================== --- ql/src/test/results/clientpositive/partition_wise_fileformat13.q.out (revision 0) +++ ql/src/test/results/clientpositive/partition_wise_fileformat13.q.out (working copy) @@ -0,0 +1,128 @@ +PREHOOK: query: -- This tests that the schema can be changed for partitioned tables for binary serde data for joins +create table T1(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- This tests that the schema can be changed for partitioned tables for binary serde data for joins +create table T1(key string, value string) partitioned by (dt string) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@T1 +PREHOOK: query: alter table T1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: default@t1 +PREHOOK: Output: default@t1 +POSTHOOK: query: alter table T1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@t1 +PREHOOK: query: insert overwrite table T1 partition (dt='1') select * from src where key = 238 or key = 97 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@t1@dt=1 +POSTHOOK: query: insert overwrite table T1 partition (dt='1') select * from src where key = 238 or key = 97 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@t1@dt=1 +POSTHOOK: Lineage: t1 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table T1 change key key int +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@t1 +PREHOOK: Output: default@t1 +POSTHOOK: query: alter table T1 change key key int +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@t1 +POSTHOOK: Lineage: t1 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table T1 partition (dt='2') select * from src where key = 238 or key = 97 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@t1@dt=2 +POSTHOOK: query: insert overwrite table T1 partition (dt='2') select * from src where key = 238 or key = 97 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@t1@dt=2 +POSTHOOK: Lineage: t1 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table T1 change key key string +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@t1 +PREHOOK: Output: default@t1 +POSTHOOK: query: alter table T1 change key key string +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@t1 +POSTHOOK: Lineage: t1 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table T2(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table T2(key string, value string) partitioned by (dt string) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@T2 +POSTHOOK: Lineage: t1 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table T2 partition (dt='1') select * from src where key = 238 or key = 97 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@t2@dt=1 +POSTHOOK: query: insert overwrite table T2 partition (dt='1') select * from src where key = 238 or key = 97 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@t2@dt=1 +POSTHOOK: Lineage: t1 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=1 +PREHOOK: Input: default@t1@dt=2 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=1 +POSTHOOK: Input: default@t1@dt=2 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: t1 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +16 +PREHOOK: query: select count(*) FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=1 +PREHOOK: Input: default@t1@dt=2 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=1 +POSTHOOK: Input: default@t1@dt=2 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: t1 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1 PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +16 Index: ql/src/test/results/clientpositive/louter_join_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/louter_join_ppr.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/louter_join_ppr.q.out (working copy) @@ -134,7 +134,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -181,7 +180,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -441,7 +439,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -488,7 +485,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -535,7 +531,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -582,7 +577,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -853,7 +847,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -900,7 +893,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -947,7 +939,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -994,7 +985,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1260,7 +1250,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1307,7 +1296,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketcontext_8.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketcontext_8.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketcontext_8.q.out (working copy) @@ -175,7 +175,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -183,7 +182,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -225,7 +223,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -233,7 +230,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -423,7 +419,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -431,7 +426,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -473,7 +467,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -481,7 +474,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/rand_partitionpruner2.q.out =================================================================== --- ql/src/test/results/clientpositive/rand_partitionpruner2.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/rand_partitionpruner2.q.out (working copy) @@ -95,7 +95,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -142,7 +141,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketcontext_3.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketcontext_3.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketcontext_3.q.out (working copy) @@ -150,7 +150,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -158,7 +157,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 @@ -345,7 +343,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -353,7 +350,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out (working copy) @@ -72,7 +72,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -119,7 +118,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin8.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin8.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketmapjoin8.q.out (working copy) @@ -152,7 +152,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 0 partition_columns part rawDataSize 0 @@ -396,7 +395,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 0 partition_columns part rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin12.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin12.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketmapjoin12.q.out (working copy) @@ -180,7 +180,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 0 partition_columns part rawDataSize 0 @@ -409,7 +408,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 0 partition_columns part rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin3.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin3.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketmapjoin3.q.out (working copy) @@ -215,7 +215,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_2 numFiles 2 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 @@ -726,7 +725,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part numFiles 4 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/smb_mapjoin_12.q.out =================================================================== --- ql/src/test/results/clientpositive/smb_mapjoin_12.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/smb_mapjoin_12.q.out (working copy) @@ -157,7 +157,6 @@ partition values: ds 1 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 16 bucket_field_name key columns key,value @@ -165,7 +164,6 @@ #### A masked pattern was here #### name default.test_table1 numFiles 16 - numPartitions 1 numRows 500 partition_columns ds rawDataSize 5312 @@ -398,7 +396,6 @@ partition values: ds 1 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 16 bucket_field_name key columns key,value @@ -406,7 +403,6 @@ #### A masked pattern was here #### name default.test_table3 numFiles 16 - numPartitions 1 numRows 3084 partition_columns ds rawDataSize 32904 Index: ql/src/test/results/clientpositive/outer_join_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/outer_join_ppr.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/outer_join_ppr.q.out (working copy) @@ -126,7 +126,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -173,7 +172,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -220,7 +218,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -267,7 +264,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -528,7 +524,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -575,7 +570,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -622,7 +616,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -669,7 +662,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/partition_wise_fileformat9.q.out =================================================================== --- ql/src/test/results/clientpositive/partition_wise_fileformat9.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/partition_wise_fileformat9.q.out (working copy) @@ -75,3 +75,39 @@ 104 val_104 1 104 val_104 2 104 val_104 2 +PREHOOK: query: select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned@dt=2 +#### A masked pattern was here #### +POSTHOOK: query: select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned@dt=2 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0.0 val_0 1 +0.0 val_0 1 +0.0 val_0 1 +0.0 val_0 2 +0.0 val_0 2 +0.0 val_0 2 +4.0 val_2 1 +4.0 val_2 2 +8.0 val_4 1 +8.0 val_4 2 +10.0 val_5 1 +10.0 val_5 1 +10.0 val_5 1 +10.0 val_5 2 +10.0 val_5 2 +10.0 val_5 2 +16.0 val_8 1 +16.0 val_8 2 +18.0 val_9 1 +18.0 val_9 2 Index: ql/src/test/results/clientpositive/stats11.q.out =================================================================== --- ql/src/test/results/clientpositive/stats11.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/stats11.q.out (working copy) @@ -909,7 +909,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part numFiles 4 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 @@ -1153,7 +1152,6 @@ hdfs directory: true #### A masked pattern was here #### - PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value from srcbucket_mapjoin a join srcbucket_mapjoin_part b Index: ql/src/test/results/clientpositive/input23.q.out =================================================================== --- ql/src/test/results/clientpositive/input23.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/input23.q.out (working copy) @@ -71,7 +71,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/groupby_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_ppr.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/groupby_ppr.q.out (working copy) @@ -70,7 +70,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -117,7 +116,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/input_part7.q.out =================================================================== --- ql/src/test/results/clientpositive/input_part7.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/input_part7.q.out (working copy) @@ -150,7 +150,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -197,7 +196,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/join33.q.out =================================================================== --- ql/src/test/results/clientpositive/join33.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/join33.q.out (working copy) @@ -210,7 +210,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/input_part2.q.out =================================================================== --- ql/src/test/results/clientpositive/input_part2.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/input_part2.q.out (working copy) @@ -167,7 +167,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -214,7 +213,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/partition_wise_fileformat12.q.out =================================================================== --- ql/src/test/results/clientpositive/partition_wise_fileformat12.q.out (revision 0) +++ ql/src/test/results/clientpositive/partition_wise_fileformat12.q.out (working copy) @@ -0,0 +1,216 @@ +PREHOOK: query: -- This tests that the schema can be changed for binary serde data +create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- This tests that the schema can be changed for binary serde data +create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@partition_test_partitioned +PREHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Output: default@partition_test_partitioned +POSTHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Output: default@partition_test_partitioned +PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_test_partitioned@dt=1 +POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_test_partitioned@dt=1 +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 1 +238 val_238 1 +PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476.0 val_238 +476.0 val_238 +PREHOOK: query: alter table partition_test_partitioned change key key int +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Output: default@partition_test_partitioned +POSTHOOK: query: alter table partition_test_partitioned change key key int +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Output: default@partition_test_partitioned +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476 val_238 +476 val_238 +PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 1 +238 val_238 1 +PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='2') select * from src where key = 97 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_test_partitioned@dt=2 +POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='2') select * from src where key = 97 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_test_partitioned@dt=2 +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table partition_test_partitioned add columns (value2 string) +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Output: default@partition_test_partitioned +POSTHOOK: query: alter table partition_test_partitioned add columns (value2 string) +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Output: default@partition_test_partitioned +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned@dt=2 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned@dt=2 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476 val_238 +476 val_238 +194 val_97 +194 val_97 +PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned@dt=2 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned@dt=2 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 NULL 1 +238 val_238 NULL 1 +97 val_97 NULL 2 +97 val_97 NULL 2 +PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='3') select key, value, value from src where key = 200 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_test_partitioned@dt=3 +POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='3') select key, value, value from src where key = 200 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_test_partitioned@dt=3 +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key+key, value, value2 from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned@dt=2 +PREHOOK: Input: default@partition_test_partitioned@dt=3 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value, value2 from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned@dt=2 +POSTHOOK: Input: default@partition_test_partitioned@dt=3 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476 val_238 NULL +476 val_238 NULL +194 val_97 NULL +194 val_97 NULL +400 val_200 val_200 +400 val_200 val_200 +PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned@dt=2 +PREHOOK: Input: default@partition_test_partitioned@dt=3 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned@dt=2 +POSTHOOK: Input: default@partition_test_partitioned@dt=3 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 NULL 1 +238 val_238 NULL 1 +97 val_97 NULL 2 +97 val_97 NULL 2 +200 val_200 val_200 3 +200 val_200 val_200 3 Index: ql/src/test/results/clientpositive/groupby_map_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_map_ppr.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/groupby_map_ppr.q.out (working copy) @@ -87,7 +87,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -134,7 +133,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketcontext_7.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketcontext_7.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketcontext_7.q.out (working copy) @@ -175,7 +175,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -183,7 +182,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -225,7 +223,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -233,7 +230,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -423,7 +419,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -431,7 +426,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -473,7 +467,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -481,7 +474,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/combine2_hadoop20.q.out =================================================================== --- ql/src/test/results/clientpositive/combine2_hadoop20.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/combine2_hadoop20.q.out (working copy) @@ -250,7 +250,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 1 partition_columns value rawDataSize 2 @@ -296,7 +295,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 3 partition_columns value rawDataSize 3 @@ -342,7 +340,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 1 partition_columns value rawDataSize 1 @@ -388,7 +385,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 1 partition_columns value rawDataSize 1 @@ -434,7 +430,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 3 partition_columns value rawDataSize 3 @@ -480,7 +475,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 1 partition_columns value rawDataSize 1 @@ -526,7 +520,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 1 partition_columns value rawDataSize 1 @@ -572,7 +565,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 1 partition_columns value rawDataSize 2 Index: ql/src/test/results/clientpositive/bucketcontext_2.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketcontext_2.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketcontext_2.q.out (working copy) @@ -150,7 +150,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -158,7 +157,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -200,7 +198,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -208,7 +205,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -396,7 +392,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -404,7 +399,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -446,7 +440,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -454,7 +447,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out (working copy) @@ -97,7 +97,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -144,7 +143,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin7.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin7.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketmapjoin7.q.out (working copy) @@ -162,7 +162,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin11.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin11.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketmapjoin11.q.out (working copy) @@ -212,7 +212,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 0 partition_columns part rawDataSize 0 @@ -260,7 +259,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 4 - numPartitions 2 numRows 0 partition_columns part rawDataSize 0 @@ -497,7 +495,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 0 partition_columns part rawDataSize 0 @@ -545,7 +542,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 4 - numPartitions 2 numRows 0 partition_columns part rawDataSize 0 Index: ql/src/test/results/clientpositive/join26.q.out =================================================================== --- ql/src/test/results/clientpositive/join26.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/join26.q.out (working copy) @@ -156,7 +156,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin2.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin2.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketmapjoin2.q.out (working copy) @@ -198,7 +198,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part numFiles 4 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 @@ -709,7 +708,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_2 numFiles 2 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 @@ -1414,7 +1412,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part numFiles 4 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/join_map_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/join_map_ppr.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/join_map_ppr.q.out (working copy) @@ -162,7 +162,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -729,7 +728,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/smb_mapjoin_11.q.out =================================================================== --- ql/src/test/results/clientpositive/smb_mapjoin_11.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/smb_mapjoin_11.q.out (working copy) @@ -136,7 +136,6 @@ partition values: ds 1 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 16 bucket_field_name key columns key,value @@ -144,7 +143,6 @@ #### A masked pattern was here #### name default.test_table1 numFiles 16 - numPartitions 1 numRows 500 partition_columns ds rawDataSize 5312 Index: ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out =================================================================== --- ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out (working copy) @@ -79,7 +79,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -126,7 +125,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -272,7 +270,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -319,7 +316,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -366,7 +362,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -413,7 +408,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/sample1.q.out =================================================================== --- ql/src/test/results/clientpositive/sample1.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/sample1.q.out (working copy) @@ -106,7 +106,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/partition_wise_fileformat8.q.out =================================================================== --- ql/src/test/results/clientpositive/partition_wise_fileformat8.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/partition_wise_fileformat8.q.out (working copy) @@ -105,3 +105,43 @@ 100 val_100 3 103 val_103 1 103 val_103 1 +PREHOOK: query: select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned@dt=2 +PREHOOK: Input: default@partition_test_partitioned@dt=3 +#### A masked pattern was here #### +POSTHOOK: query: select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned@dt=2 +POSTHOOK: Input: default@partition_test_partitioned@dt=3 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0.0 val_0 1 +0.0 val_0 1 +0.0 val_0 1 +0.0 val_0 2 +0.0 val_0 2 +0.0 val_0 2 +0.0 val_0 3 +0.0 val_0 3 +0.0 val_0 3 +4.0 val_2 1 +4.0 val_2 2 +4.0 val_2 3 +8.0 val_4 1 +8.0 val_4 2 +8.0 val_4 3 +10.0 val_5 1 +10.0 val_5 1 +10.0 val_5 1 +10.0 val_5 2 +10.0 val_5 2 Index: ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out =================================================================== --- ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out (working copy) @@ -215,7 +215,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 500 partition_columns part rawDataSize 5312 @@ -263,7 +262,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 500 partition_columns part rawDataSize 5312 Index: ql/src/test/results/clientpositive/union22.q.out =================================================================== --- ql/src/test/results/clientpositive/union22.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/union22.q.out (working copy) @@ -194,7 +194,6 @@ #### A masked pattern was here #### name default.dst_union22 numFiles 1 - numPartitions 1 numRows 500 partition_columns ds rawDataSize 11124 @@ -439,7 +438,6 @@ #### A masked pattern was here #### name default.dst_union22_delta numFiles 1 - numPartitions 1 numRows 500 partition_columns ds rawDataSize 16936 Index: ql/src/test/results/clientpositive/input_part1.q.out =================================================================== --- ql/src/test/results/clientpositive/input_part1.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/input_part1.q.out (working copy) @@ -102,7 +102,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/partition_wise_fileformat11.q.out =================================================================== --- ql/src/test/results/clientpositive/partition_wise_fileformat11.q.out (revision 0) +++ ql/src/test/results/clientpositive/partition_wise_fileformat11.q.out (working copy) @@ -0,0 +1,123 @@ +PREHOOK: query: -- This tests that the schema can be changed for binary serde data +create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- This tests that the schema can be changed for binary serde data +create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@partition_test_partitioned +PREHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Output: default@partition_test_partitioned +POSTHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Output: default@partition_test_partitioned +PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_test_partitioned@dt=1 +POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_test_partitioned@dt=1 +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 1 +238 val_238 1 +PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476.0 val_238 +476.0 val_238 +PREHOOK: query: alter table partition_test_partitioned change key key int +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Output: default@partition_test_partitioned +POSTHOOK: query: alter table partition_test_partitioned change key key int +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Output: default@partition_test_partitioned +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476 val_238 +476 val_238 +PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 1 +238 val_238 1 +PREHOOK: query: alter table partition_test_partitioned add columns (value2 string) +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Output: default@partition_test_partitioned +POSTHOOK: query: alter table partition_test_partitioned add columns (value2 string) +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Output: default@partition_test_partitioned +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476 val_238 +476 val_238 +PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 NULL 1 +238 val_238 NULL 1 Index: ql/src/test/results/clientpositive/transform_ppr2.q.out =================================================================== --- ql/src/test/results/clientpositive/transform_ppr2.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/transform_ppr2.q.out (working copy) @@ -92,7 +92,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -139,7 +138,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/union_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/union_ppr.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/union_ppr.q.out (working copy) @@ -152,7 +152,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -199,7 +198,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketcontext_6.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketcontext_6.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketcontext_6.q.out (working copy) @@ -149,7 +149,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -157,7 +156,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -199,7 +197,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -207,7 +204,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -393,7 +389,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -401,7 +396,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -443,7 +437,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -451,7 +444,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketcontext_1.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketcontext_1.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketcontext_1.q.out (working copy) @@ -162,7 +162,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -170,7 +169,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -212,7 +210,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -220,7 +217,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -408,7 +404,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -416,7 +411,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -458,7 +452,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -466,7 +459,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin10.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin10.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketmapjoin10.q.out (working copy) @@ -199,7 +199,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 0 partition_columns part rawDataSize 0 @@ -247,7 +246,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 3 - numPartitions 2 numRows 0 partition_columns part rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin1.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin1.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/bucketmapjoin1.q.out (working copy) @@ -982,7 +982,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part numFiles 4 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 @@ -1226,7 +1225,6 @@ hdfs directory: true #### A masked pattern was here #### - PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value from srcbucket_mapjoin a join srcbucket_mapjoin_part b Index: ql/src/test/results/clientpositive/sample10.q.out =================================================================== --- ql/src/test/results/clientpositive/sample10.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/sample10.q.out (working copy) @@ -115,7 +115,6 @@ #### A masked pattern was here #### name default.srcpartbucket numFiles 4 - numPartitions 4 numRows 10 partition_columns ds/hr rawDataSize 60 @@ -164,7 +163,6 @@ #### A masked pattern was here #### name default.srcpartbucket numFiles 4 - numPartitions 4 numRows 10 partition_columns ds/hr rawDataSize 60 @@ -213,7 +211,6 @@ #### A masked pattern was here #### name default.srcpartbucket numFiles 4 - numPartitions 4 numRows 10 partition_columns ds/hr rawDataSize 60 @@ -262,7 +259,6 @@ #### A masked pattern was here #### name default.srcpartbucket numFiles 4 - numPartitions 4 numRows 10 partition_columns ds/hr rawDataSize 60 Index: ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out =================================================================== --- ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out (working copy) @@ -151,7 +151,6 @@ partition values: part 1 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -159,7 +158,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 500 partition_columns part rawDataSize 5312 Index: ql/src/test/results/clientpositive/partition_wise_fileformat10.q.out =================================================================== --- ql/src/test/results/clientpositive/partition_wise_fileformat10.q.out (revision 0) +++ ql/src/test/results/clientpositive/partition_wise_fileformat10.q.out (working copy) @@ -0,0 +1,79 @@ +PREHOOK: query: -- This tests that the schema can be changed for binary serde data +create table prt(key string, value string) partitioned by (dt string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- This tests that the schema can be changed for binary serde data +create table prt(key string, value string) partitioned by (dt string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@prt +PREHOOK: query: insert overwrite table prt partition(dt='1') select * from src where key = 238 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@prt@dt=1 +POSTHOOK: query: insert overwrite table prt partition(dt='1') select * from src where key = 238 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@prt@dt=1 +POSTHOOK: Lineage: prt PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: prt PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from prt where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from prt where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: prt PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: prt PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 1 +238 val_238 1 +PREHOOK: query: select key+key, value from prt where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@prt +PREHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from prt where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@prt +POSTHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: prt PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: prt PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476.0 val_238 +476.0 val_238 +PREHOOK: query: alter table prt add columns (value2 string) +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@prt +PREHOOK: Output: default@prt +POSTHOOK: query: alter table prt add columns (value2 string) +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@prt +POSTHOOK: Output: default@prt +POSTHOOK: Lineage: prt PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: prt PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key+key, value from prt where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@prt +PREHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from prt where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@prt +POSTHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: prt PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: prt PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476.0 val_238 +476.0 val_238 +PREHOOK: query: select * from prt where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from prt where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: prt PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: prt PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 NULL 1 +238 val_238 NULL 1 Index: ql/src/test/results/clientpositive/transform_ppr1.q.out =================================================================== --- ql/src/test/results/clientpositive/transform_ppr1.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/transform_ppr1.q.out (working copy) @@ -90,7 +90,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -137,7 +136,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -184,7 +182,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -231,7 +228,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/ppd_union_view.q.out =================================================================== --- ql/src/test/results/clientpositive/ppd_union_view.q.out (revision 1437268) +++ ql/src/test/results/clientpositive/ppd_union_view.q.out (working copy) @@ -260,7 +260,6 @@ #### A masked pattern was here #### name default.t1_mapping numFiles 1 - numPartitions 2 numRows 1 partition_columns ds rawDataSize 12 @@ -306,7 +305,6 @@ #### A masked pattern was here #### name default.t1_old numFiles 1 - numPartitions 2 numRows 1 partition_columns ds rawDataSize 14 @@ -803,7 +801,6 @@ #### A masked pattern was here #### name default.t1_new numFiles 1 - numPartitions 2 numRows 1 partition_columns ds rawDataSize 11 Index: ql/src/test/results/compiler/plan/input2.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input2.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/input2.q.xml (working copy) @@ -1601,10 +1601,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -1621,18 +1617,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -1645,10 +1629,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1656,10 +1636,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join3.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join3.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/join3.q.xml (working copy) @@ -182,10 +182,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -202,18 +198,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -226,10 +210,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -237,10 +217,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -354,10 +330,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -374,18 +346,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -398,10 +358,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -409,10 +365,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -526,10 +478,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -546,18 +494,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -570,10 +506,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -581,10 +513,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input4.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input4.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/input4.q.xml (working copy) @@ -182,10 +182,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -202,18 +198,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -226,10 +210,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -237,10 +217,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join5.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join5.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/join5.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -201,10 +177,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -221,18 +193,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -245,10 +205,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -256,10 +212,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input6.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input6.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/input6.q.xml (working copy) @@ -554,10 +554,6 @@ default.src1 - numFiles - 1 - - columns.types string:string @@ -574,18 +570,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -598,10 +582,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 216 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -609,10 +589,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input_testxpath2.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input_testxpath2.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/input_testxpath2.q.xml (working copy) @@ -29,10 +29,6 @@ default.src_thrift - numFiles - 1 - - columns.types @@ -49,18 +45,6 @@ org.apache.thrift.protocol.TBinaryProtocol - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - serialization.class org.apache.hadoop.hive.serde2.thrift.test.Complex @@ -77,10 +61,6 @@ org.apache.hadoop.mapred.SequenceFileInputFormat - totalSize - 1606 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -88,10 +68,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join7.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join7.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/join7.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -201,10 +177,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -221,18 +193,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -245,10 +205,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -256,10 +212,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -373,10 +325,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -393,18 +341,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -417,10 +353,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -428,10 +360,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input8.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input8.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/input8.q.xml (working copy) @@ -29,10 +29,6 @@ default.src1 - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 216 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/union.q.xml =================================================================== --- ql/src/test/results/compiler/plan/union.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/union.q.xml (working copy) @@ -427,10 +427,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -447,18 +443,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -471,10 +455,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -482,10 +462,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -599,10 +575,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -619,18 +591,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -643,10 +603,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -654,10 +610,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/udf4.q.xml =================================================================== --- ql/src/test/results/compiler/plan/udf4.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/udf4.q.xml (working copy) @@ -64,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/udf6.q.xml =================================================================== --- ql/src/test/results/compiler/plan/udf6.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/udf6.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input_part1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input_part1.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/input_part1.q.xml (working copy) @@ -39,7 +39,7 @@ numFiles - 4 + 1 columns.types @@ -66,10 +66,6 @@ 0 - numPartitions - 4 - - partition_columns ds/hr @@ -87,7 +83,7 @@ totalSize - 23248 + 5812 file.outputformat @@ -865,10 +861,6 @@ 0 - numPartitions - 4 - - partition_columns ds/hr Index: ql/src/test/results/compiler/plan/groupby2.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby2.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/groupby2.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/groupby4.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby4.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/groupby4.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/groupby6.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby6.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/groupby6.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/case_sensitivity.q.xml =================================================================== --- ql/src/test/results/compiler/plan/case_sensitivity.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/case_sensitivity.q.xml (working copy) @@ -554,10 +554,6 @@ default.src_thrift - numFiles - 1 - - columns.types @@ -574,18 +570,6 @@ org.apache.thrift.protocol.TBinaryProtocol - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - serialization.class org.apache.hadoop.hive.serde2.thrift.test.Complex @@ -602,10 +586,6 @@ org.apache.hadoop.mapred.SequenceFileInputFormat - totalSize - 1606 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -613,10 +593,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/sample1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample1.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/sample1.q.xml (working copy) @@ -39,7 +39,7 @@ numFiles - 4 + 1 columns.types @@ -66,10 +66,6 @@ 0 - numPartitions - 4 - - partition_columns ds/hr @@ -87,7 +83,7 @@ totalSize - 23248 + 5812 file.outputformat @@ -986,10 +982,6 @@ 0 - numPartitions - 4 - - partition_columns ds/hr Index: ql/src/test/results/compiler/plan/sample3.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample3.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/sample3.q.xml (working copy) @@ -554,10 +554,6 @@ default.srcbucket - numFiles - 2 - - columns.types int:string @@ -578,18 +574,6 @@ 1 - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count 2 @@ -602,10 +586,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 11603 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -613,10 +593,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/sample5.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample5.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/sample5.q.xml (working copy) @@ -554,10 +554,6 @@ default.srcbucket - numFiles - 2 - - columns.types int:string @@ -578,18 +574,6 @@ 1 - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count 2 @@ -602,10 +586,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 11603 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -613,10 +593,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/sample7.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample7.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/sample7.q.xml (working copy) @@ -554,10 +554,6 @@ default.srcbucket - numFiles - 2 - - columns.types int:string @@ -578,18 +574,6 @@ 1 - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count 2 @@ -602,10 +586,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 11603 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -613,10 +593,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/cast1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/cast1.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/cast1.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input1.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/input1.q.xml (working copy) @@ -554,10 +554,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -574,18 +570,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -598,10 +582,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -609,10 +589,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join2.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join2.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/join2.q.xml (working copy) @@ -193,10 +193,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -213,18 +209,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -237,10 +221,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -248,10 +228,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -1282,7 +1258,7 @@ 200 - + 1 @@ -1773,10 +1749,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -1793,18 +1765,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -1817,10 +1777,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1828,10 +1784,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -1945,10 +1897,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -1965,18 +1913,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -1989,10 +1925,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2000,10 +1932,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input3.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input3.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/input3.q.xml (working copy) @@ -1978,10 +1978,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -1998,18 +1994,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -2022,10 +2006,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2033,10 +2013,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join4.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join4.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/join4.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -201,10 +177,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -221,18 +193,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -245,10 +205,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -256,10 +212,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input5.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input5.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/input5.q.xml (working copy) @@ -182,10 +182,6 @@ default.src_thrift - numFiles - 1 - - columns.types @@ -202,18 +198,6 @@ org.apache.thrift.protocol.TBinaryProtocol - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - serialization.class org.apache.hadoop.hive.serde2.thrift.test.Complex @@ -230,10 +214,6 @@ org.apache.hadoop.mapred.SequenceFileInputFormat - totalSize - 1606 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -241,10 +221,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join6.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join6.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/join6.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -201,10 +177,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -221,18 +193,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -245,10 +205,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -256,10 +212,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input7.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input7.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/input7.q.xml (working copy) @@ -554,10 +554,6 @@ default.src1 - numFiles - 1 - - columns.types string:string @@ -574,18 +570,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -598,10 +582,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 216 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -609,10 +589,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join8.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join8.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/join8.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -201,10 +177,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -221,18 +193,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -245,10 +205,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -256,10 +212,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input_testsequencefile.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input_testsequencefile.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/input_testsequencefile.q.xml (working copy) @@ -554,10 +554,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -574,18 +570,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -598,10 +582,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -609,10 +589,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input9.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input9.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/input9.q.xml (working copy) @@ -554,10 +554,6 @@ default.src1 - numFiles - 1 - - columns.types string:string @@ -574,18 +570,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -598,10 +582,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 216 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -609,10 +589,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/udf1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/udf1.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/udf1.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input_testxpath.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input_testxpath.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/input_testxpath.q.xml (working copy) @@ -29,10 +29,6 @@ default.src_thrift - numFiles - 1 - - columns.types @@ -49,18 +45,6 @@ org.apache.thrift.protocol.TBinaryProtocol - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - serialization.class org.apache.hadoop.hive.serde2.thrift.test.Complex @@ -77,10 +61,6 @@ org.apache.hadoop.mapred.SequenceFileInputFormat - totalSize - 1606 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -88,10 +68,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/groupby1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby1.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/groupby1.q.xml (working copy) @@ -182,10 +182,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -202,18 +198,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -226,10 +210,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -237,10 +217,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/udf_case.q.xml =================================================================== --- ql/src/test/results/compiler/plan/udf_case.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/udf_case.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/subq.q.xml =================================================================== --- ql/src/test/results/compiler/plan/subq.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/subq.q.xml (working copy) @@ -427,10 +427,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -447,18 +443,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -471,10 +455,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -482,10 +462,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/groupby3.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby3.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/groupby3.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/groupby5.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby5.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/groupby5.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/udf_when.q.xml =================================================================== --- ql/src/test/results/compiler/plan/udf_when.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/udf_when.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input20.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input20.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/input20.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/sample2.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample2.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/sample2.q.xml (working copy) @@ -554,10 +554,6 @@ default.srcbucket - numFiles - 2 - - columns.types int:string @@ -578,18 +574,6 @@ 1 - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count 2 @@ -602,10 +586,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 11603 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -613,10 +593,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/sample4.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample4.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/sample4.q.xml (working copy) @@ -554,10 +554,6 @@ default.srcbucket - numFiles - 2 - - columns.types int:string @@ -578,18 +574,6 @@ 1 - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count 2 @@ -602,10 +586,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 11603 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -613,10 +593,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/sample6.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample6.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/sample6.q.xml (working copy) @@ -554,10 +554,6 @@ default.srcbucket - numFiles - 2 - - columns.types int:string @@ -578,18 +574,6 @@ 1 - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count 2 @@ -602,10 +586,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 11603 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -613,10 +593,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join1.q.xml (revision 1437268) +++ ql/src/test/results/compiler/plan/join1.q.xml (working copy) @@ -182,10 +182,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -202,18 +198,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -226,10 +210,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -237,10 +217,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -354,10 +330,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -374,18 +346,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -398,10 +358,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -409,10 +365,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/org/apache/hadoop/hive/ql/metadata/TestPartition.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestPartition.java (revision 1437268) +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestPartition.java (working copy) @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.metadata; - -import java.net.URI; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import junit.framework.TestCase; - -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; - -/** - * Test the partition class. - */ -public class TestPartition extends TestCase { - - private static final String PARTITION_COL = "partcol"; - private static final String PARTITION_VALUE = "value"; - private static final String TABLENAME = "tablename"; - - /** - * Test that the Partition spec is created properly. - */ - public void testPartition() throws HiveException, URISyntaxException { - StorageDescriptor sd = new StorageDescriptor(); - sd.setLocation("partlocation"); - - Partition tp = new Partition(); - tp.setTableName(TABLENAME); - tp.setSd(sd); - - List values = new ArrayList(); - values.add(PARTITION_VALUE); - tp.setValues(values); - - List partCols = new ArrayList(); - partCols.add(new FieldSchema(PARTITION_COL, "string", "")); - - Table tbl = new Table("default", TABLENAME); - tbl.setDataLocation(new URI("tmplocation")); - tbl.setPartCols(partCols); - - Map spec = new org.apache.hadoop.hive.ql.metadata.Partition(tbl, tp).getSpec(); - assertFalse(spec.isEmpty()); - assertEquals(spec.get(PARTITION_COL), PARTITION_VALUE); - } - -} Index: ql/src/test/queries/clientpositive/partition_wise_fileformat8.q =================================================================== --- ql/src/test/queries/clientpositive/partition_wise_fileformat8.q (revision 1437268) +++ ql/src/test/queries/clientpositive/partition_wise_fileformat8.q (working copy) @@ -10,3 +10,4 @@ insert overwrite table partition_test_partitioned partition(dt='3') select * from src; select * from partition_test_partitioned where dt is not null order by key, value, dt limit 20; +select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20; Index: ql/src/test/queries/clientpositive/partition_wise_fileformat12.q =================================================================== --- ql/src/test/queries/clientpositive/partition_wise_fileformat12.q (revision 0) +++ ql/src/test/queries/clientpositive/partition_wise_fileformat12.q (working copy) @@ -0,0 +1,26 @@ +set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; + +-- This tests that the schema can be changed for binary serde data +create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile; +alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; +insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238; + +select * from partition_test_partitioned where dt is not null; +select key+key, value from partition_test_partitioned where dt is not null; + +alter table partition_test_partitioned change key key int; + +select key+key, value from partition_test_partitioned where dt is not null; +select * from partition_test_partitioned where dt is not null; + +insert overwrite table partition_test_partitioned partition(dt='2') select * from src where key = 97; + +alter table partition_test_partitioned add columns (value2 string); + +select key+key, value from partition_test_partitioned where dt is not null; +select * from partition_test_partitioned where dt is not null; + +insert overwrite table partition_test_partitioned partition(dt='3') select key, value, value from src where key = 200; + +select key+key, value, value2 from partition_test_partitioned where dt is not null; +select * from partition_test_partitioned where dt is not null; Index: ql/src/test/queries/clientpositive/partition_wise_fileformat9.q =================================================================== --- ql/src/test/queries/clientpositive/partition_wise_fileformat9.q (revision 1437268) +++ ql/src/test/queries/clientpositive/partition_wise_fileformat9.q (working copy) @@ -8,3 +8,5 @@ insert overwrite table partition_test_partitioned partition(dt='2') select * from src; select * from partition_test_partitioned where dt is not null order by key, value, dt limit 20; +select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20; + Index: ql/src/test/queries/clientpositive/partition_wise_fileformat13.q =================================================================== --- ql/src/test/queries/clientpositive/partition_wise_fileformat13.q (revision 0) +++ ql/src/test/queries/clientpositive/partition_wise_fileformat13.q (working copy) @@ -0,0 +1,17 @@ +set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; + +-- This tests that the schema can be changed for partitioned tables for binary serde data for joins +create table T1(key string, value string) partitioned by (dt string) stored as rcfile; +alter table T1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; +insert overwrite table T1 partition (dt='1') select * from src where key = 238 or key = 97; + +alter table T1 change key key int; +insert overwrite table T1 partition (dt='2') select * from src where key = 238 or key = 97; + +alter table T1 change key key string; + +create table T2(key string, value string) partitioned by (dt string) stored as rcfile; +insert overwrite table T2 partition (dt='1') select * from src where key = 238 or key = 97; + +select /* + MAPJOIN(a) */ count(*) FROM T1 a JOIN T2 b ON a.key = b.key; +select count(*) FROM T1 a JOIN T2 b ON a.key = b.key; \ No newline at end of file Index: ql/src/test/queries/clientpositive/partition_wise_fileformat10.q =================================================================== --- ql/src/test/queries/clientpositive/partition_wise_fileformat10.q (revision 0) +++ ql/src/test/queries/clientpositive/partition_wise_fileformat10.q (working copy) @@ -0,0 +1,13 @@ +set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; + +-- This tests that the schema can be changed for binary serde data +create table prt(key string, value string) partitioned by (dt string); +insert overwrite table prt partition(dt='1') select * from src where key = 238; + +select * from prt where dt is not null; +select key+key, value from prt where dt is not null; + +alter table prt add columns (value2 string); + +select key+key, value from prt where dt is not null; +select * from prt where dt is not null; Index: ql/src/test/queries/clientpositive/partition_wise_fileformat14.q =================================================================== --- ql/src/test/queries/clientpositive/partition_wise_fileformat14.q (revision 0) +++ ql/src/test/queries/clientpositive/partition_wise_fileformat14.q (working copy) @@ -0,0 +1,57 @@ +set hive.enforce.bucketing = true; +set hive.enforce.sorting = true; +set hive.exec.reducers.max = 1; + +CREATE TABLE tbl1(key int, value string) PARTITIONED by (ds string) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS rcfile; +CREATE TABLE tbl2(key int, value string) PARTITIONED by (ds string) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS rcfile; + +alter table tbl1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; +alter table tbl2 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; + +insert overwrite table tbl1 partition (ds='1') select * from src where key < 10; +insert overwrite table tbl2 partition (ds='1') select * from src where key < 10; + +alter table tbl1 change key key int; +insert overwrite table tbl1 partition (ds='2') select * from src where key < 10; + +alter table tbl1 change key key string; + +-- The subquery itself is being map-joined. Multiple partitions of tbl1 with different schemas are being read for tbl2 +select /*+mapjoin(subq1)*/ count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key; + +set hive.optimize.bucketmapjoin = true; +set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; + +-- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should +-- be converted to a bucketized mapside join. Multiple partitions of tbl1 with different schemas are being read for each +-- bucket of tbl2 +select /*+mapjoin(subq1)*/ count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key; + +set hive.optimize.bucketmapjoin.sortedmerge = true; + +-- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should +-- be converted to a sort-merge join. Multiple partitions of tbl1 with different schemas are being read for a +-- given file of tbl2 +select /*+mapjoin(subq1)*/ count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key; + +-- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side +-- join should be performed. Multiple partitions of tbl1 with different schemas are being read for tbl2 +select /*+mapjoin(subq1)*/ count(*) from + (select a.key+1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + join + (select a.key+1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + on subq1.key = subq2.key; Index: ql/src/test/queries/clientpositive/partition_wise_fileformat11.q =================================================================== --- ql/src/test/queries/clientpositive/partition_wise_fileformat11.q (revision 0) +++ ql/src/test/queries/clientpositive/partition_wise_fileformat11.q (working copy) @@ -0,0 +1,19 @@ +set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; + +-- This tests that the schema can be changed for binary serde data +create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile; +alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; +insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238; + +select * from partition_test_partitioned where dt is not null; +select key+key, value from partition_test_partitioned where dt is not null; + +alter table partition_test_partitioned change key key int; + +select key+key, value from partition_test_partitioned where dt is not null; +select * from partition_test_partitioned where dt is not null; + +alter table partition_test_partitioned add columns (value2 string); + +select key+key, value from partition_test_partitioned where dt is not null; +select * from partition_test_partitioned where dt is not null; Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (revision 1437268) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (working copy) @@ -825,7 +825,12 @@ partDir.add(p); try { - partDesc.add(Utilities.getPartitionDescFromTableDesc(tblDesc, part)); + if (part.getTable().isPartitioned()) { + partDesc.add(Utilities.getPartitionDesc(part)); + } + else { + partDesc.add(Utilities.getPartitionDescFromTableDesc(tblDesc, part)); + } } catch (HiveException e) { LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); throw new SemanticException(e.getMessage(), e); Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (revision 1437268) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (working copy) @@ -214,7 +214,7 @@ getInputFormatClass(); // This will set up field: outputFormatClass getOutputFormatClass(); - + getDeserializer(); } public String getName() { @@ -276,6 +276,10 @@ return MetaStoreUtils.getSchema(tPartition, table.getTTable()); } + public Properties getMetadataFromPartitionSchema() { + return MetaStoreUtils.getPartitionMetadata(tPartition, table.getTTable()); + } + public Properties getSchemaFromTableSchema(Properties tblSchema) { return MetaStoreUtils.getPartSchemaFromTableSchema(tPartition.getSd(), table.getTTable().getSd(), tPartition.getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys(), Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (revision 1437268) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (working copy) @@ -228,8 +228,8 @@ tTable.getSd().setOutputFormat(outputFormatClass.getName()); } - final public Properties getSchema() { - return MetaStoreUtils.getSchema(tTable); + final public Properties getMetadata() { + return MetaStoreUtils.getTableMetadata(tTable); } final public Path getPath() { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (revision 1437268) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (working copy) @@ -38,6 +38,7 @@ import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.PartitionDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.serde2.Deserializer; @@ -45,6 +46,8 @@ import org.apache.hadoop.hive.serde2.SerDeStats; import org.apache.hadoop.hive.serde2.SerDeUtils; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; @@ -78,7 +81,9 @@ private transient Writable[] vcValues; private transient List vcs; private transient Object[] rowWithPartAndVC; - private transient StructObjectInspector rowObjectInspector; + private transient StructObjectInspector tblRowObjectInspector; + // convert from partition to table schema + private transient Converter partTblObjectInspectorConverter; private transient boolean isPartitioned; private transient boolean hasVC; private Map opCtxMap; @@ -112,9 +117,6 @@ public boolean equals(Object o) { if (o instanceof MapInputPath) { MapInputPath mObj = (MapInputPath) o; - if (mObj == null) { - return false; - } return path.equals(mObj.path) && alias.equals(mObj.alias) && op.equals(mObj.op); } @@ -141,15 +143,16 @@ } private static class MapOpCtx { - boolean isPartitioned; - StructObjectInspector rawRowObjectInspector; // without partition - StructObjectInspector partObjectInspector; // partition - StructObjectInspector rowObjectInspector; - Object[] rowWithPart; - Object[] rowWithPartAndVC; - Deserializer deserializer; - public String tableName; - public String partName; + private final boolean isPartitioned; + private final StructObjectInspector tblRawRowObjectInspector; // without partition + private final StructObjectInspector partObjectInspector; // partition + private StructObjectInspector rowObjectInspector; + private final Converter partTblObjectInspectorConverter; + private final Object[] rowWithPart; + private Object[] rowWithPartAndVC; + private final Deserializer deserializer; + private String tableName; + private String partName; /** * @param isPartitioned @@ -158,18 +161,20 @@ */ public MapOpCtx(boolean isPartitioned, StructObjectInspector rowObjectInspector, - StructObjectInspector rawRowObjectInspector, + StructObjectInspector tblRawRowObjectInspector, StructObjectInspector partObjectInspector, Object[] rowWithPart, Object[] rowWithPartAndVC, - Deserializer deserializer) { + Deserializer deserializer, + Converter partTblObjectInspectorConverter) { this.isPartitioned = isPartitioned; this.rowObjectInspector = rowObjectInspector; - this.rawRowObjectInspector = rawRowObjectInspector; + this.tblRawRowObjectInspector = tblRawRowObjectInspector; this.partObjectInspector = partObjectInspector; this.rowWithPart = rowWithPart; this.rowWithPartAndVC = rowWithPartAndVC; this.deserializer = deserializer; + this.partTblObjectInspectorConverter = partTblObjectInspectorConverter; } /** @@ -186,6 +191,10 @@ return rowObjectInspector; } + public StructObjectInspector getTblRawRowObjectInspector() { + return tblRawRowObjectInspector; + } + /** * @return the rowWithPart */ @@ -206,6 +215,10 @@ public Deserializer getDeserializer() { return deserializer; } + + public Converter getPartTblObjectInspectorConverter() { + return partTblObjectInspectorConverter; + } } /** @@ -225,38 +238,46 @@ } private MapOpCtx initObjectInspector(MapredWork conf, - Configuration hconf, String onefile) throws HiveException, + Configuration hconf, String onefile, Map convertedOI) + throws HiveException, ClassNotFoundException, InstantiationException, IllegalAccessException, SerDeException { - PartitionDesc td = conf.getPathToPartitionInfo().get(onefile); - LinkedHashMap partSpec = td.getPartSpec(); - Properties tblProps = td.getProperties(); + PartitionDesc pd = conf.getPathToPartitionInfo().get(onefile); + LinkedHashMap partSpec = pd.getPartSpec(); + // Use tblProps in case of unpartitioned tables + Properties partProps = + (pd.getPartSpec() == null || pd.getPartSpec().isEmpty()) ? + pd.getTableDesc().getProperties() : pd.getProperties(); - Class sdclass = td.getDeserializerClass(); - if (sdclass == null) { - String className = td.getSerdeClassName(); - if ((className == "") || (className == null)) { + Class serdeclass = pd.getDeserializerClass(); + if (serdeclass == null) { + String className = pd.getSerdeClassName(); + if ((className == null) || (className.isEmpty())) { throw new HiveException( "SerDe class or the SerDe class name is not set for table: " - + td.getProperties().getProperty("name")); + + pd.getProperties().getProperty("name")); } - sdclass = hconf.getClassByName(className); + serdeclass = hconf.getClassByName(className); } - String tableName = String.valueOf(tblProps.getProperty("name")); + String tableName = String.valueOf(partProps.getProperty("name")); String partName = String.valueOf(partSpec); - // HiveConf.setVar(hconf, HiveConf.ConfVars.HIVETABLENAME, tableName); - // HiveConf.setVar(hconf, HiveConf.ConfVars.HIVEPARTITIONNAME, partName); - Deserializer deserializer = (Deserializer) sdclass.newInstance(); - deserializer.initialize(hconf, tblProps); - StructObjectInspector rawRowObjectInspector = (StructObjectInspector) deserializer + Deserializer partDeserializer = (Deserializer) serdeclass.newInstance(); + partDeserializer.initialize(hconf, partProps); + StructObjectInspector partRawRowObjectInspector = (StructObjectInspector) partDeserializer .getObjectInspector(); + StructObjectInspector tblRawRowObjectInspector = convertedOI.get(pd.getTableDesc()); + + partTblObjectInspectorConverter = + ObjectInspectorConverters.getConverter(partRawRowObjectInspector, + tblRawRowObjectInspector); + MapOpCtx opCtx = null; // Next check if this table has partitions and if so // get the list of partition names as well as allocate // the serdes for the partition columns - String pcols = tblProps + String pcols = partProps .getProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS); // Log LOG = LogFactory.getLog(MapOperator.class.getName()); if (pcols != null && pcols.length() > 0) { @@ -285,16 +306,16 @@ rowWithPart[1] = partValues; StructObjectInspector rowObjectInspector = ObjectInspectorFactory .getUnionStructObjectInspector(Arrays - .asList(new StructObjectInspector[] {rawRowObjectInspector, partObjectInspector})); + .asList(new StructObjectInspector[] {tblRawRowObjectInspector, partObjectInspector})); // LOG.info("dump " + tableName + " " + partName + " " + // rowObjectInspector.getTypeName()); - opCtx = new MapOpCtx(true, rowObjectInspector, rawRowObjectInspector, partObjectInspector, - rowWithPart, null, deserializer); + opCtx = new MapOpCtx(true, rowObjectInspector, tblRawRowObjectInspector, partObjectInspector, + rowWithPart, null, partDeserializer, partTblObjectInspectorConverter); } else { // LOG.info("dump2 " + tableName + " " + partName + " " + // rowObjectInspector.getTypeName()); - opCtx = new MapOpCtx(false, rawRowObjectInspector, rawRowObjectInspector, null, null, - null, deserializer); + opCtx = new MapOpCtx(false, tblRawRowObjectInspector, tblRawRowObjectInspector, null, null, + null, partDeserializer, partTblObjectInspectorConverter); } opCtx.tableName = tableName; opCtx.partName = partName; @@ -312,15 +333,20 @@ isPartitioned = opCtxMap.get(inp).isPartitioned(); rowWithPart = opCtxMap.get(inp).getRowWithPart(); rowWithPartAndVC = opCtxMap.get(inp).getRowWithPartAndVC(); - rowObjectInspector = opCtxMap.get(inp).getRowObjectInspector(); + tblRowObjectInspector = opCtxMap.get(inp).getRowObjectInspector(); + partTblObjectInspectorConverter = opCtxMap.get(inp).getPartTblObjectInspectorConverter(); if (listInputPaths.contains(inp)) { return; } listInputPaths.add(inp); + // The op may not be a TableScan for mapjoins + // Consider the query: select /*+MAPJOIN(a)*/ count(*) FROM T1 a JOIN T2 b ON a.key = b.key; + // In that case, it will be a Select, but the rowOI need not be ammended if (op instanceof TableScanOperator) { - StructObjectInspector rawRowObjectInspector = opCtxMap.get(inp).rawRowObjectInspector; + StructObjectInspector tblRawRowObjectInspector = + opCtxMap.get(inp).getTblRawRowObjectInspector(); StructObjectInspector partObjectInspector = opCtxMap.get(inp).partObjectInspector; TableScanOperator tsOp = (TableScanOperator) op; TableScanDesc tsDesc = tsOp.getConf(); @@ -348,22 +374,100 @@ this.rowWithPartAndVC = new Object[2]; } if (partObjectInspector == null) { - this.rowObjectInspector = ObjectInspectorFactory.getUnionStructObjectInspector(Arrays + this.tblRowObjectInspector = ObjectInspectorFactory.getUnionStructObjectInspector(Arrays .asList(new StructObjectInspector[] { - rowObjectInspector, vcStructObjectInspector})); + tblRowObjectInspector, vcStructObjectInspector})); } else { - this.rowObjectInspector = ObjectInspectorFactory.getUnionStructObjectInspector(Arrays + this.tblRowObjectInspector = ObjectInspectorFactory.getUnionStructObjectInspector(Arrays .asList(new StructObjectInspector[] { - rawRowObjectInspector, partObjectInspector, + tblRawRowObjectInspector, partObjectInspector, vcStructObjectInspector})); } - opCtxMap.get(inp).rowObjectInspector = this.rowObjectInspector; + opCtxMap.get(inp).rowObjectInspector = this.tblRowObjectInspector; opCtxMap.get(inp).rowWithPartAndVC = this.rowWithPartAndVC; } } } } + // Return the mapping for table descriptor to the expected table OI + /** + * Traverse all the partitions for a table, and get the OI for the table. + * Note that a conversion is required if any of the partition OI is different + * from the table OI. For eg. if the query references table T (partitions P1, P2), + * and P1's schema is same as T, whereas P2's scheme is different from T, conversion + * might be needed for both P1 and P2, since SettableOI might be needed for T + */ + private Map getConvertedOI(Configuration hconf) + throws HiveException { + Map tableDescOI = + new HashMap(); + Set identityConverterTableDesc = new HashSet(); + try + { + for (String onefile : conf.getPathToAliases().keySet()) { + PartitionDesc pd = conf.getPathToPartitionInfo().get(onefile); + TableDesc tableDesc = pd.getTableDesc(); + Properties tblProps = tableDesc.getProperties(); + // If the partition does not exist, use table properties + Properties partProps = + (pd.getPartSpec() == null || pd.getPartSpec().isEmpty()) ? + tblProps : pd.getProperties(); + + Class sdclass = pd.getDeserializerClass(); + if (sdclass == null) { + String className = pd.getSerdeClassName(); + if ((className == null) || (className.isEmpty())) { + throw new HiveException( + "SerDe class or the SerDe class name is not set for table: " + + pd.getProperties().getProperty("name")); + } + sdclass = hconf.getClassByName(className); + } + + Deserializer partDeserializer = (Deserializer) sdclass.newInstance(); + partDeserializer.initialize(hconf, partProps); + StructObjectInspector partRawRowObjectInspector = (StructObjectInspector) partDeserializer + .getObjectInspector(); + + StructObjectInspector tblRawRowObjectInspector = tableDescOI.get(tableDesc); + if ((tblRawRowObjectInspector == null) || + (identityConverterTableDesc.contains(tableDesc))) { + sdclass = tableDesc.getDeserializerClass(); + if (sdclass == null) { + String className = tableDesc.getSerdeClassName(); + if ((className == null) || (className.isEmpty())) { + throw new HiveException( + "SerDe class or the SerDe class name is not set for table: " + + tableDesc.getProperties().getProperty("name")); + } + sdclass = hconf.getClassByName(className); + } + Deserializer tblDeserializer = (Deserializer) sdclass.newInstance(); + tblDeserializer.initialize(hconf, tblProps); + tblRawRowObjectInspector = + (StructObjectInspector) ObjectInspectorConverters.getConvertedOI( + partRawRowObjectInspector, + (StructObjectInspector) tblDeserializer.getObjectInspector()); + + if (identityConverterTableDesc.contains(tableDesc)) { + if (!partRawRowObjectInspector.equals(tblRawRowObjectInspector)) { + identityConverterTableDesc.remove(tableDesc); + } + } + else if (partRawRowObjectInspector.equals(tblRawRowObjectInspector)) { + identityConverterTableDesc.add(tableDesc); + } + + tableDescOI.put(tableDesc, tblRawRowObjectInspector); + } + } + } catch (Exception e) { + throw new HiveException(e); + } + return tableDescOI; + } + public void setChildren(Configuration hconf) throws HiveException { Path fpath = new Path((new Path(HiveConf.getVar(hconf, @@ -375,10 +479,10 @@ operatorToPaths = new HashMap, ArrayList>(); statsMap.put(Counter.DESERIALIZE_ERRORS, deserialize_error_count); - + Map convertedOI = getConvertedOI(hconf); try { for (String onefile : conf.getPathToAliases().keySet()) { - MapOpCtx opCtx = initObjectInspector(conf, hconf, onefile); + MapOpCtx opCtx = initObjectInspector(conf, hconf, onefile, convertedOI); Path onepath = new Path(new Path(onefile).toUri().getPath()); List aliases = conf.getPathToAliases().get(onefile); @@ -514,16 +618,18 @@ Object row = null; try { if (this.hasVC) { - this.rowWithPartAndVC[0] = deserializer.deserialize(value); + this.rowWithPartAndVC[0] = + partTblObjectInspectorConverter.convert(deserializer.deserialize(value)); int vcPos = isPartitioned ? 2 : 1; if (context != null) { populateVirtualColumnValues(context, vcs, vcValues, deserializer); } this.rowWithPartAndVC[vcPos] = this.vcValues; } else if (!isPartitioned) { - row = deserializer.deserialize((Writable) value); + row = partTblObjectInspectorConverter.convert(deserializer.deserialize((Writable) value)); } else { - rowWithPart[0] = deserializer.deserialize((Writable) value); + rowWithPart[0] = + partTblObjectInspectorConverter.convert(deserializer.deserialize((Writable) value)); } } catch (Exception e) { // Serialize the row and output. @@ -540,24 +646,26 @@ throw new HiveException("Hive Runtime Error while processing writable " + rawRowString, e); } + // The row has been converted to comply with table schema, irrespective of partition schema. + // So, use tblOI (and not partOI) for forwarding try { if (this.hasVC) { - forward(this.rowWithPartAndVC, this.rowObjectInspector); + forward(this.rowWithPartAndVC, this.tblRowObjectInspector); } else if (!isPartitioned) { - forward(row, rowObjectInspector); + forward(row, tblRowObjectInspector); } else { - forward(rowWithPart, rowObjectInspector); + forward(rowWithPart, tblRowObjectInspector); } } catch (Exception e) { // Serialize the row and output the error message. String rowString; try { if (this.hasVC) { - rowString = SerDeUtils.getJSONString(rowWithPartAndVC, rowObjectInspector); + rowString = SerDeUtils.getJSONString(rowWithPartAndVC, tblRowObjectInspector); } else if (!isPartitioned) { - rowString = SerDeUtils.getJSONString(row, rowObjectInspector); + rowString = SerDeUtils.getJSONString(row, tblRowObjectInspector); } else { - rowString = SerDeUtils.getJSONString(rowWithPart, rowObjectInspector); + rowString = SerDeUtils.getJSONString(rowWithPart, tblRowObjectInspector); } } catch (Exception e2) { rowString = "[Error getting row data with exception " + Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java (revision 1437268) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java (working copy) @@ -109,8 +109,6 @@ dummyOp.setExecContext(execContext); dummyOp.initialize(jc,null); } - - } catch (Throwable e) { abort = true; if (e instanceof OutOfMemoryError) { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (revision 1437268) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (working copy) @@ -50,6 +50,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.DelegatedObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; @@ -92,6 +94,9 @@ private transient Writable value; private transient Writable[] vcValues; private transient Deserializer serde; + private transient Deserializer tblSerde; + private transient Converter partTblObjectInspectorConverter; + private transient Iterator iterPath; private transient Iterator iterPartDesc; private transient Path currPath; @@ -220,34 +225,35 @@ return inputFormats.get(inputFormatClass); } - private StructObjectInspector setTableDesc(TableDesc table) throws Exception { + private StructObjectInspector getRowInspectorFromTable(TableDesc table) throws Exception { Deserializer serde = table.getDeserializerClass().newInstance(); serde.initialize(job, table.getProperties()); - return createRowInspector(getCurrent(serde)); + return createRowInspector(getStructOIFrom(serde.getObjectInspector())); } - private StructObjectInspector setPrtnDesc(PartitionDesc partition) throws Exception { - Deserializer serde = partition.getDeserializerClass().newInstance(); - serde.initialize(job, partition.getProperties()); + private StructObjectInspector getRowInspectorFromPartition(PartitionDesc partition, + ObjectInspector partitionOI) throws Exception { + String pcols = partition.getTableDesc().getProperties().getProperty( org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS); String[] partKeys = pcols.trim().split("/"); row[1] = createPartValue(partKeys, partition.getPartSpec()); - return createRowInspector(getCurrent(serde), partKeys); + + return createRowInspector(getStructOIFrom(partitionOI), partKeys); } - private StructObjectInspector setPrtnDesc(TableDesc table) throws Exception { + private StructObjectInspector getRowInspectorFromPartitionedTable(TableDesc table) + throws Exception { Deserializer serde = table.getDeserializerClass().newInstance(); serde.initialize(job, table.getProperties()); String pcols = table.getProperties().getProperty( org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS); String[] partKeys = pcols.trim().split("/"); row[1] = null; - return createRowInspector(getCurrent(serde), partKeys); + return createRowInspector(getStructOIFrom(serde.getObjectInspector()), partKeys); } - private StructObjectInspector getCurrent(Deserializer serde) throws SerDeException { - ObjectInspector current = serde.getObjectInspector(); + private StructObjectInspector getStructOIFrom(ObjectInspector current) throws SerDeException { if (objectInspector != null) { current = DelegatedObjectInspectorFactory.reset(objectInspector, current); } else { @@ -360,16 +366,16 @@ job.set("mapred.input.dir", org.apache.hadoop.util.StringUtils.escapeString(currPath .toString())); - PartitionDesc tmp; + PartitionDesc partDesc; if (currTbl == null) { - tmp = currPart; + partDesc = currPart; } else { - tmp = new PartitionDesc(currTbl, null); + partDesc = new PartitionDesc(currTbl, null); } - Class formatter = tmp.getInputFileFormatClass(); + Class formatter = partDesc.getInputFileFormatClass(); inputFormat = getInputFormatFromCache(formatter, job); - Utilities.copyTableJobPropertiesToConf(tmp.getTableDesc(), job); + Utilities.copyTableJobPropertiesToConf(partDesc.getTableDesc(), job); InputSplit[] splits = inputFormat.getSplits(job, 1); FetchInputFormatSplit[] inputSplits = new FetchInputFormatSplit[splits.length]; for (int i = 0; i < splits.length; i++) { @@ -381,17 +387,32 @@ this.inputSplits = inputSplits; splitNum = 0; - serde = tmp.getDeserializerClass().newInstance(); - serde.initialize(job, tmp.getProperties()); + serde = partDesc.getDeserializerClass().newInstance(); + serde.initialize(job, partDesc.getProperties()); + if (currTbl != null) { + tblSerde = serde; + } + else { + tblSerde = currPart.getTableDesc().getDeserializerClass().newInstance(); + tblSerde.initialize(job, currPart.getTableDesc().getProperties()); + } + + ObjectInspector outputOI = ObjectInspectorConverters.getConvertedOI( + serde.getObjectInspector(), + tblSerde.getObjectInspector()); + + partTblObjectInspectorConverter = ObjectInspectorConverters.getConverter( + serde.getObjectInspector(), outputOI); + if (LOG.isDebugEnabled()) { LOG.debug("Creating fetchTask with deserializer typeinfo: " + serde.getObjectInspector().getTypeName()); - LOG.debug("deserializer properties: " + tmp.getProperties()); + LOG.debug("deserializer properties: " + partDesc.getProperties()); } if (currPart != null) { - setPrtnDesc(currPart); + getRowInspectorFromPartition(currPart, outputOI); } } @@ -503,14 +524,15 @@ vcValues = MapOperator.populateVirtualColumnValues(context, vcCols, vcValues, serde); row[isPartitioned ? 2 : 1] = vcValues; } - row[0] = serde.deserialize(value); + row[0] = partTblObjectInspectorConverter.convert(serde.deserialize(value)); + if (hasVC || isPartitioned) { inspectable.o = row; inspectable.oi = rowObjectInspector; return inspectable; } inspectable.o = row[0]; - inspectable.oi = serde.getObjectInspector(); + inspectable.oi = tblSerde.getObjectInspector(); return inspectable; } else { currRecReader.close(); @@ -569,13 +591,33 @@ public ObjectInspector getOutputObjectInspector() throws HiveException { try { if (work.isNotPartitioned()) { - return setTableDesc(work.getTblDesc()); + return getRowInspectorFromTable(work.getTblDesc()); } List listParts = work.getPartDesc(); + // Chose the table descriptor if none of the partitions is present. + // For eg: consider the query: + // select /*+mapjoin(T1)*/ count(*) from T1 join T2 on T1.key=T2.key + // Both T1 and T2 and partitioned tables, but T1 does not have any partitions + // FetchOperator is invoked for T1, and listParts is empty. In that case, + // use T1's schema to get the ObjectInspector. if (listParts == null || listParts.isEmpty()) { - return setPrtnDesc(work.getTblDesc()); + return getRowInspectorFromPartitionedTable(work.getTblDesc()); } - return setPrtnDesc(listParts.get(0)); + + // Choose any partition. It's OI needs to be converted to the table OI + // Whenever a new partition is being read, a new converter is being created + PartitionDesc partition = listParts.get(0); + Deserializer tblSerde = partition.getTableDesc().getDeserializerClass().newInstance(); + tblSerde.initialize(job, partition.getTableDesc().getProperties()); + + Deserializer partSerde = partition.getDeserializerClass().newInstance(); + partSerde.initialize(job, partition.getProperties()); + + ObjectInspector partitionOI = ObjectInspectorConverters.getConvertedOI( + partSerde.getObjectInspector(), + tblSerde.getObjectInspector()); + + return getRowInspectorFromPartition(partition, partitionOI); } catch (Exception e) { throw new HiveException("Failed with exception " + e.getMessage() + org.apache.hadoop.util.StringUtils.stringifyException(e)); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (revision 1437268) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (working copy) @@ -120,8 +120,8 @@ import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; +import org.apache.hadoop.hive.ql.plan.PlanUtils.ExpressionTypes; import org.apache.hadoop.hive.ql.plan.TableDesc; -import org.apache.hadoop.hive.ql.plan.PlanUtils.ExpressionTypes; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsFactory; import org.apache.hadoop.hive.ql.stats.StatsPublisher; @@ -137,8 +137,8 @@ import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.SequenceFile.CompressionType; import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.SequenceFile.CompressionType; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.DefaultCodec; import org.apache.hadoop.mapred.FileOutputFormat; @@ -559,30 +559,6 @@ } } - /** - * Tuple. - * - * @param - * @param - */ - public static class Tuple { - private final T one; - private final V two; - - public Tuple(T one, V two) { - this.one = one; - this.two = two; - } - - public T getOne() { - return this.one; - } - - public V getTwo() { - return this.two; - } - } - public static TableDesc defaultTd; static { // by default we expect ^A separated strings @@ -693,7 +669,7 @@ public static TableDesc getTableDesc(Table tbl) { return (new TableDesc(tbl.getDeserializer().getClass(), tbl.getInputFormatClass(), tbl - .getOutputFormatClass(), tbl.getSchema())); + .getOutputFormatClass(), tbl.getMetadata())); } // column names and column types are all delimited by comma Index: ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java (revision 1437268) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java (working copy) @@ -30,6 +30,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.persistence.RowContainer; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -40,7 +41,6 @@ import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.SMBJoinDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; -import org.apache.hadoop.hive.ql.util.ObjectPair; import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; Index: ql/src/java/org/apache/hadoop/hive/ql/util/ObjectPair.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/util/ObjectPair.java (revision 1437268) +++ ql/src/java/org/apache/hadoop/hive/ql/util/ObjectPair.java (working copy) @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.util; - -public class ObjectPair { - private F first; - private S second; - - public ObjectPair() {} - - public ObjectPair(F first, S second) { - this.first = first; - this.second = second; - } - - public F getFirst() { - return first; - } - - public void setFirst(F first) { - this.first = first; - } - - public S getSecond() { - return second; - } - - public void setSecond(S second) { - this.second = second; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java (revision 1437268) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java (working copy) @@ -87,7 +87,7 @@ public PartitionDesc(final org.apache.hadoop.hive.ql.metadata.Partition part) throws HiveException { tableDesc = Utilities.getTableDesc(part.getTable()); - properties = part.getSchema(); + properties = part.getMetadataFromPartitionSchema(); partSpec = part.getSpec(); deserializerClass = part.getDeserializer(properties).getClass(); inputFileFormatClass = part.getInputFormatClass(); Index: ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java (revision 1437268) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java (working copy) @@ -20,9 +20,9 @@ import java.io.Serializable; import java.util.Enumeration; -import java.util.Properties; import java.util.LinkedHashMap; import java.util.Map; +import java.util.Properties; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; @@ -149,7 +149,7 @@ org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE) != null); } - + @Override public Object clone() { TableDesc ret = new TableDesc(); @@ -170,4 +170,42 @@ } return ret; } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((deserializerClass == null) ? 0 : deserializerClass.hashCode()); + result = prime * result + + ((inputFileFormatClass == null) ? 0 : inputFileFormatClass.hashCode()); + result = prime * result + + ((outputFileFormatClass == null) ? 0 : outputFileFormatClass.hashCode()); + result = prime * result + ((properties == null) ? 0 : properties.hashCode()); + result = prime * result + ((serdeClassName == null) ? 0 : serdeClassName.hashCode()); + result = prime * result + ((jobProperties == null) ? 0 : jobProperties.hashCode()); + return result; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof TableDesc)) { + return false; + } + + TableDesc target = (TableDesc) o; + boolean ret = true; + ret = ret && (deserializerClass == null ? target.deserializerClass == null : + deserializerClass.equals(target.deserializerClass)); + ret = ret && (inputFileFormatClass == null ? target.inputFileFormatClass == null : + inputFileFormatClass.equals(target.inputFileFormatClass)); + ret = ret && (outputFileFormatClass == null ? target.outputFileFormatClass == null : + outputFileFormatClass.equals(target.outputFileFormatClass)); + ret = ret && (properties == null ? target.properties == null : + properties.equals(target.properties)); + ret = ret && (serdeClassName == null ? target.serdeClassName == null : + serdeClassName.equals(target.serdeClassName)); + ret = ret && (jobProperties == null ? target.jobProperties == null : + jobProperties.equals(target.jobProperties)); + return ret; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 1437268) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JavaUtils; +import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.TableType; @@ -165,7 +166,6 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFHash; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr; import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF; -import org.apache.hadoop.hive.ql.util.ObjectPair; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe;