Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 1432949) +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy) @@ -1286,7 +1286,7 @@ return null; } return new Partition(mpart.getValues(), dbName, tblName, mpart.getCreateTime(), - mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd(), true), + mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd(), false), mpart.getParameters()); } @@ -1681,7 +1681,7 @@ query.setOrdering("partitionName ascending"); List mparts = (List) query.executeWithMap(params); - // pm.retrieveAll(mparts); // retrieveAll is pessimistic. some fields may not be needed + pm.retrieveAll(mparts); // retrieveAll is pessimistic. some fields may not be needed List results = convertToParts(dbName, tblName, mparts); // pm.makeTransientAll(mparts); // makeTransient will prohibit future access of unfetched fields query.closeAll(); Index: metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (revision 1432949) +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (working copy) @@ -237,7 +237,7 @@ String lib = part.getSd().getSerdeInfo().getSerializationLib(); try { Deserializer deserializer = SerDeUtils.lookupDeserializer(lib); - deserializer.initialize(conf, MetaStoreUtils.getSchema(part, table)); + deserializer.initialize(conf, MetaStoreUtils.getPartitionSchema(part, table)); return deserializer; } catch (RuntimeException e) { throw e; @@ -497,6 +497,15 @@ .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys()); } + public static Properties getPartitionSchema( + org.apache.hadoop.hive.metastore.api.Partition partition, + org.apache.hadoop.hive.metastore.api.Table table) { + return MetaStoreUtils + .getSchema(partition.getSd(), partition.getSd(), partition + .getParameters(), table.getDbName(), table.getTableName(), + table.getPartitionKeys()); + } + public static Properties getSchema( org.apache.hadoop.hive.metastore.api.Partition part, org.apache.hadoop.hive.metastore.api.Table table) { Index: common/src/java/org/apache/hadoop/hive/common/ObjectPair.java =================================================================== --- common/src/java/org/apache/hadoop/hive/common/ObjectPair.java (revision 0) +++ common/src/java/org/apache/hadoop/hive/common/ObjectPair.java (working copy) @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.common; + +public class ObjectPair { + private F first; + private S second; + + public ObjectPair() {} + + public ObjectPair(F first, S second) { + this.first = first; + this.second = second; + } + + public F getFirst() { + return first; + } + + public void setFirst(F first) { + this.first = first; + } + + public S getSecond() { + return second; + } + + public void setSecond(S second) { + this.second = second; + } +} Index: serde/src/java/org/apache/hadoop/hive/serde2/NullStructSerDe.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/NullStructSerDe.java (revision 1432949) +++ serde/src/java/org/apache/hadoop/hive/serde2/NullStructSerDe.java (working copy) @@ -56,32 +56,11 @@ return null; } + private static ObjectInspector nullStructOI = new NullStructSerDeObjectInspector(); + @Override public ObjectInspector getObjectInspector() throws SerDeException { - return new StructObjectInspector() { - public String getTypeName() { - return "null"; - } - public Category getCategory() { - return Category.PRIMITIVE; - } - @Override - public StructField getStructFieldRef(String fieldName) { - return null; - } - @Override - public List getAllStructFieldRefs() { - return new ArrayList(); - } - @Override - public Object getStructFieldData(Object data, StructField fieldRef) { - return null; - } - @Override - public List getStructFieldsDataAsList(Object data) { - return new ArrayList(); - } - }; + return nullStructOI; } @Override @@ -103,4 +82,38 @@ return NullWritable.get(); } + + /** + * A object inspector for null struct serde. + */ + public static class NullStructSerDeObjectInspector extends StructObjectInspector { + public String getTypeName() { + return "null"; + } + + public Category getCategory() { + return Category.PRIMITIVE; + } + + @Override + public StructField getStructFieldRef(String fieldName) { + return null; + } + + @Override + public List getAllStructFieldRefs() { + return new ArrayList(); + } + + @Override + public Object getStructFieldData(Object data, StructField fieldRef) { + return null; + } + + @Override + public List getStructFieldsDataAsList(Object data) { + return new ArrayList(); + } + } + } Index: serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/SettableStructObjectInspector.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/SettableStructObjectInspector.java (revision 1432949) +++ serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/SettableStructObjectInspector.java (working copy) @@ -34,4 +34,9 @@ */ public abstract Object setStructFieldData(Object struct, StructField field, Object fieldValue); + + @Override + public boolean isSettable() { + return true; + } } Index: serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/StructObjectInspector.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/StructObjectInspector.java (revision 1432949) +++ serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/StructObjectInspector.java (working copy) @@ -47,6 +47,10 @@ */ public abstract List getStructFieldsDataAsList(Object data); + public boolean isSettable() { + return false; + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); Index: serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java (revision 1432949) +++ serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java (working copy) @@ -24,8 +24,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaStringObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorConverter; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableBinaryObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableBooleanObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableBinaryObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableByteObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableDoubleObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableFloatObjectInspector; @@ -33,8 +33,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableLongObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableShortObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableTimestampObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.VoidObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableStringObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.VoidObjectInspector; /** * ObjectInspectorConverters. @@ -59,6 +59,61 @@ } } + private static Converter getConverter(PrimitiveObjectInspector inputOI, + PrimitiveObjectInspector outputOI) { + switch (outputOI.getPrimitiveCategory()) { + case BOOLEAN: + return new PrimitiveObjectInspectorConverter.BooleanConverter( + inputOI, + (SettableBooleanObjectInspector) outputOI); + case BYTE: + return new PrimitiveObjectInspectorConverter.ByteConverter( + inputOI, + (SettableByteObjectInspector) outputOI); + case SHORT: + return new PrimitiveObjectInspectorConverter.ShortConverter( + inputOI, + (SettableShortObjectInspector) outputOI); + case INT: + return new PrimitiveObjectInspectorConverter.IntConverter( + inputOI, + (SettableIntObjectInspector) outputOI); + case LONG: + return new PrimitiveObjectInspectorConverter.LongConverter( + inputOI, + (SettableLongObjectInspector) outputOI); + case FLOAT: + return new PrimitiveObjectInspectorConverter.FloatConverter( + inputOI, + (SettableFloatObjectInspector) outputOI); + case DOUBLE: + return new PrimitiveObjectInspectorConverter.DoubleConverter( + inputOI, + (SettableDoubleObjectInspector) outputOI); + case STRING: + if (outputOI instanceof WritableStringObjectInspector) { + return new PrimitiveObjectInspectorConverter.TextConverter( + inputOI); + } else if (outputOI instanceof JavaStringObjectInspector) { + return new PrimitiveObjectInspectorConverter.StringConverter( + inputOI); + } + case TIMESTAMP: + return new PrimitiveObjectInspectorConverter.TimestampConverter( + inputOI, + (SettableTimestampObjectInspector) outputOI); + case BINARY: + return new PrimitiveObjectInspectorConverter.BinaryConverter( + inputOI, + (SettableBinaryObjectInspector)outputOI); + + default: + throw new RuntimeException("Hive internal error: conversion of " + + inputOI.getTypeName() + " to " + outputOI.getTypeName() + + " not supported yet."); + } + } + /** * Returns a converter that converts objects from one OI to another OI. The * returned (converted) object belongs to this converter, so that it can be @@ -73,57 +128,7 @@ } switch (outputOI.getCategory()) { case PRIMITIVE: - switch (((PrimitiveObjectInspector) outputOI).getPrimitiveCategory()) { - case BOOLEAN: - return new PrimitiveObjectInspectorConverter.BooleanConverter( - (PrimitiveObjectInspector) inputOI, - (SettableBooleanObjectInspector) outputOI); - case BYTE: - return new PrimitiveObjectInspectorConverter.ByteConverter( - (PrimitiveObjectInspector) inputOI, - (SettableByteObjectInspector) outputOI); - case SHORT: - return new PrimitiveObjectInspectorConverter.ShortConverter( - (PrimitiveObjectInspector) inputOI, - (SettableShortObjectInspector) outputOI); - case INT: - return new PrimitiveObjectInspectorConverter.IntConverter( - (PrimitiveObjectInspector) inputOI, - (SettableIntObjectInspector) outputOI); - case LONG: - return new PrimitiveObjectInspectorConverter.LongConverter( - (PrimitiveObjectInspector) inputOI, - (SettableLongObjectInspector) outputOI); - case FLOAT: - return new PrimitiveObjectInspectorConverter.FloatConverter( - (PrimitiveObjectInspector) inputOI, - (SettableFloatObjectInspector) outputOI); - case DOUBLE: - return new PrimitiveObjectInspectorConverter.DoubleConverter( - (PrimitiveObjectInspector) inputOI, - (SettableDoubleObjectInspector) outputOI); - case STRING: - if (outputOI instanceof WritableStringObjectInspector) { - return new PrimitiveObjectInspectorConverter.TextConverter( - (PrimitiveObjectInspector) inputOI); - } else if (outputOI instanceof JavaStringObjectInspector) { - return new PrimitiveObjectInspectorConverter.StringConverter( - (PrimitiveObjectInspector) inputOI); - } - case TIMESTAMP: - return new PrimitiveObjectInspectorConverter.TimestampConverter( - (PrimitiveObjectInspector) inputOI, - (SettableTimestampObjectInspector) outputOI); - case BINARY: - return new PrimitiveObjectInspectorConverter.BinaryConverter( - (PrimitiveObjectInspector)inputOI, - (SettableBinaryObjectInspector)outputOI); - - default: - throw new RuntimeException("Hive internal error: conversion of " - + inputOI.getTypeName() + " to " + outputOI.getTypeName() - + " not supported yet."); - } + return getConverter((PrimitiveObjectInspector) inputOI, (PrimitiveObjectInspector) outputOI); case STRUCT: return new StructConverter(inputOI, (SettableStructObjectInspector) outputOI); @@ -140,6 +145,50 @@ } } + public static ObjectInspector getConvertedOI( + ObjectInspector inputOI, + ObjectInspector outputOI) { + // If the inputOI is the same as the outputOI, just return it + if (inputOI == outputOI) { + return outputOI; + } + switch (outputOI.getCategory()) { + case PRIMITIVE: + return outputOI; + case STRUCT: + StructObjectInspector structOutputOI = (StructObjectInspector) outputOI; + if (structOutputOI.isSettable()) { + return outputOI; + } + else { + // create a standard settable struct object inspector + List listFields = structOutputOI.getAllStructFieldRefs(); + List structFieldNames = new ArrayList(listFields.size()); + List structFieldObjectInspectors = new ArrayList( + listFields.size()); + + for (StructField listField : listFields) { + structFieldNames.add(listField.getFieldName()); + structFieldObjectInspectors.add(listField.getFieldObjectInspector()); + } + + StandardStructObjectInspector structStandardOutputOI = ObjectInspectorFactory + .getStandardStructObjectInspector( + structFieldNames, + structFieldObjectInspectors); + return structStandardOutputOI; + } + case LIST: + return outputOI; + case MAP: + return outputOI; + default: + throw new RuntimeException("Hive internal error: conversion of " + + inputOI.getTypeName() + " to " + outputOI.getTypeName() + + " not supported yet."); + } + } + /** * A converter class for List. */ @@ -221,10 +270,11 @@ this.outputOI = outputOI; inputFields = this.inputOI.getAllStructFieldRefs(); outputFields = outputOI.getAllStructFieldRefs(); - assert (inputFields.size() == outputFields.size()); - fieldConverters = new ArrayList(inputFields.size()); - for (int f = 0; f < inputFields.size(); f++) { + // If the output has some extra fields, set them to NULL. + int minFields = Math.min(inputFields.size(), outputFields.size()); + fieldConverters = new ArrayList(minFields); + for (int f = 0; f < minFields; f++) { fieldConverters.add(getConverter(inputFields.get(f) .getFieldObjectInspector(), outputFields.get(f) .getFieldObjectInspector())); @@ -243,15 +293,19 @@ return null; } + int minFields = Math.min(inputFields.size(), outputFields.size()); // Convert the fields - for (int f = 0; f < inputFields.size(); f++) { - Object inputFieldValue = inputOI.getStructFieldData(input, inputFields - .get(f)); - Object outputFieldValue = fieldConverters.get(f).convert( - inputFieldValue); - outputOI.setStructFieldData(output, outputFields.get(f), - outputFieldValue); + for (int f = 0; f < minFields; f++) { + Object inputFieldValue = inputOI.getStructFieldData(input, inputFields.get(f)); + Object outputFieldValue = fieldConverters.get(f).convert(inputFieldValue); + outputOI.setStructFieldData(output, outputFields.get(f), outputFieldValue); } + + // set the extra fields to null + for (int f = minFields; f < outputFields.size(); f++) { + outputOI.setStructFieldData(output, outputFields.get(f), null); + } + return output; } } Index: ql/src/test/results/clientpositive/bucketmapjoin5.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin5.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketmapjoin5.q.out (working copy) @@ -253,7 +253,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -301,7 +300,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -813,7 +811,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_2 numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -861,7 +858,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_2 numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/pcr.q.out =================================================================== --- ql/src/test/results/clientpositive/pcr.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/pcr.q.out (working copy) @@ -123,7 +123,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -169,7 +168,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -323,7 +321,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -369,7 +366,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -415,7 +411,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -613,7 +608,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -659,7 +653,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -821,7 +814,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -867,7 +859,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1031,7 +1022,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1077,7 +1067,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1123,7 +1112,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1298,7 +1286,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1344,7 +1331,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1390,7 +1376,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1569,7 +1554,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1615,7 +1599,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1756,7 +1739,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1802,7 +1784,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -1983,7 +1964,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -2029,7 +2009,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -2075,7 +2054,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -2290,7 +2268,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -2336,7 +2313,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -2495,7 +2471,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -2773,7 +2748,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -2819,7 +2793,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 3 numRows 20 partition_columns ds rawDataSize 160 @@ -3112,7 +3085,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -3158,7 +3130,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -3204,7 +3175,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -3250,7 +3220,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -3454,7 +3423,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -3500,7 +3468,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -3546,7 +3513,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -3822,7 +3788,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -4382,7 +4347,6 @@ #### A masked pattern was here #### name default.pcr_t1 numFiles 1 - numPartitions 4 numRows 20 partition_columns ds rawDataSize 160 @@ -4938,7 +4902,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -5111,7 +5074,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -5158,7 +5120,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -5337,7 +5298,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -5384,7 +5344,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/load_dyn_part8.q.out =================================================================== --- ql/src/test/results/clientpositive/load_dyn_part8.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/load_dyn_part8.q.out (working copy) @@ -157,7 +157,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -204,7 +203,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -251,7 +249,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -298,7 +295,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/groupby_sort_6.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_sort_6.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/groupby_sort_6.q.out (working copy) @@ -421,7 +421,6 @@ #### A masked pattern was here #### name default.t1 numFiles 1 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/filter_join_breaktask.q.out =================================================================== --- ql/src/test/results/clientpositive/filter_join_breaktask.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/filter_join_breaktask.q.out (working copy) @@ -101,7 +101,6 @@ #### A masked pattern was here #### name default.filter_join_breaktask numFiles 1 - numPartitions 1 numRows 25 partition_columns ds rawDataSize 211 @@ -231,7 +230,6 @@ #### A masked pattern was here #### name default.filter_join_breaktask numFiles 1 - numPartitions 1 numRows 25 partition_columns ds rawDataSize 211 Index: ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out =================================================================== --- ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out (working copy) @@ -120,7 +120,6 @@ partition values: part 1 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 1 bucket_field_name key columns key,value @@ -128,7 +127,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 1 - numPartitions 1 numRows 500 partition_columns part rawDataSize 5312 Index: ql/src/test/results/clientpositive/input_part9.q.out =================================================================== --- ql/src/test/results/clientpositive/input_part9.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/input_part9.q.out (working copy) @@ -71,7 +71,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -118,7 +117,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/join9.q.out =================================================================== --- ql/src/test/results/clientpositive/join9.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/join9.q.out (working copy) @@ -120,7 +120,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/rand_partitionpruner3.q.out =================================================================== --- ql/src/test/results/clientpositive/rand_partitionpruner3.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/rand_partitionpruner3.q.out (working copy) @@ -73,7 +73,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -201,7 +200,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketcontext_4.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketcontext_4.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketcontext_4.q.out (working copy) @@ -162,7 +162,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -170,7 +169,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 @@ -357,7 +355,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -365,7 +362,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/merge3.q.out =================================================================== --- ql/src/test/results/clientpositive/merge3.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/merge3.q.out (working copy) @@ -2441,7 +2441,6 @@ #### A masked pattern was here #### name default.merge_src_part numFiles 2 - numPartitions 2 numRows 1000 partition_columns ds rawDataSize 10624 @@ -2487,7 +2486,6 @@ #### A masked pattern was here #### name default.merge_src_part numFiles 2 - numPartitions 2 numRows 1000 partition_columns ds rawDataSize 10624 @@ -4873,7 +4871,6 @@ #### A masked pattern was here #### name default.merge_src_part numFiles 2 - numPartitions 2 numRows 1000 partition_columns ds rawDataSize 10624 @@ -4919,7 +4916,6 @@ #### A masked pattern was here #### name default.merge_src_part numFiles 2 - numPartitions 2 numRows 1000 partition_columns ds rawDataSize 10624 Index: ql/src/test/results/clientpositive/bucketmapjoin9.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin9.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketmapjoin9.q.out (working copy) @@ -151,7 +151,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 0 partition_columns part rawDataSize 0 @@ -417,7 +416,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 0 partition_columns part rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin13.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin13.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketmapjoin13.q.out (working copy) @@ -179,7 +179,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 500 partition_columns part rawDataSize 5312 @@ -227,7 +226,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 500 partition_columns part rawDataSize 5312 @@ -478,7 +476,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 500 partition_columns part rawDataSize 5312 @@ -740,7 +737,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 500 partition_columns part rawDataSize 5312 @@ -1004,7 +1000,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 500 partition_columns part rawDataSize 5312 Index: ql/src/test/results/clientpositive/columnstats_partlvl.q.out =================================================================== --- ql/src/test/results/clientpositive/columnstats_partlvl.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/columnstats_partlvl.q.out (working copy) @@ -145,7 +145,6 @@ #### A masked pattern was here #### name default.employee_part numFiles 1 - numPartitions 2 numRows 0 partition_columns employeesalary rawDataSize 0 @@ -352,7 +351,6 @@ #### A masked pattern was here #### name default.employee_part numFiles 1 - numPartitions 2 numRows 0 partition_columns employeesalary rawDataSize 0 Index: ql/src/test/results/clientpositive/sample8.q.out =================================================================== --- ql/src/test/results/clientpositive/sample8.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/sample8.q.out (working copy) @@ -85,7 +85,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -132,7 +131,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -179,7 +177,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -226,7 +223,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/router_join_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/router_join_ppr.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/router_join_ppr.q.out (working copy) @@ -136,7 +136,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -183,7 +182,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -230,7 +228,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -277,7 +274,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -546,7 +542,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -593,7 +588,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -853,7 +847,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -900,7 +893,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1160,7 +1152,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1207,7 +1198,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1254,7 +1244,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1301,7 +1290,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/input42.q.out =================================================================== --- ql/src/test/results/clientpositive/input42.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/input42.q.out (working copy) @@ -66,7 +66,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -113,7 +112,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1258,7 +1256,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1305,7 +1302,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1828,7 +1824,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1875,7 +1870,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/louter_join_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/louter_join_ppr.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/louter_join_ppr.q.out (working copy) @@ -134,7 +134,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -181,7 +180,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -441,7 +439,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -488,7 +485,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -535,7 +531,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -582,7 +577,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -853,7 +847,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -900,7 +893,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -947,7 +939,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -994,7 +985,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1260,7 +1250,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -1307,7 +1296,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketcontext_8.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketcontext_8.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketcontext_8.q.out (working copy) @@ -175,7 +175,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -183,7 +182,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -225,7 +223,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -233,7 +230,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -423,7 +419,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -431,7 +426,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -473,7 +467,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -481,7 +474,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/rand_partitionpruner2.q.out =================================================================== --- ql/src/test/results/clientpositive/rand_partitionpruner2.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/rand_partitionpruner2.q.out (working copy) @@ -95,7 +95,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -142,7 +141,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketcontext_3.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketcontext_3.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketcontext_3.q.out (working copy) @@ -150,7 +150,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -158,7 +157,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 @@ -345,7 +343,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -353,7 +350,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out (working copy) @@ -72,7 +72,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -119,7 +118,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin8.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin8.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketmapjoin8.q.out (working copy) @@ -152,7 +152,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 0 partition_columns part rawDataSize 0 @@ -396,7 +395,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 0 partition_columns part rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin12.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin12.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketmapjoin12.q.out (working copy) @@ -180,7 +180,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 0 partition_columns part rawDataSize 0 @@ -409,7 +408,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 0 partition_columns part rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin3.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin3.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketmapjoin3.q.out (working copy) @@ -215,7 +215,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_2 numFiles 2 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 @@ -726,7 +725,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part numFiles 4 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/smb_mapjoin_12.q.out =================================================================== --- ql/src/test/results/clientpositive/smb_mapjoin_12.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/smb_mapjoin_12.q.out (working copy) @@ -157,7 +157,6 @@ partition values: ds 1 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 16 bucket_field_name key columns key,value @@ -165,7 +164,6 @@ #### A masked pattern was here #### name default.test_table1 numFiles 16 - numPartitions 1 numRows 500 partition_columns ds rawDataSize 5312 @@ -398,7 +396,6 @@ partition values: ds 1 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 16 bucket_field_name key columns key,value @@ -406,7 +403,6 @@ #### A masked pattern was here #### name default.test_table3 numFiles 16 - numPartitions 1 numRows 3084 partition_columns ds rawDataSize 32904 Index: ql/src/test/results/clientpositive/outer_join_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/outer_join_ppr.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/outer_join_ppr.q.out (working copy) @@ -126,7 +126,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -173,7 +172,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -220,7 +218,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -267,7 +264,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -528,7 +524,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -575,7 +570,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -622,7 +616,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -669,7 +662,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/partition_wise_fileformat9.q.out =================================================================== --- ql/src/test/results/clientpositive/partition_wise_fileformat9.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/partition_wise_fileformat9.q.out (working copy) @@ -75,3 +75,39 @@ 104 val_104 1 104 val_104 2 104 val_104 2 +PREHOOK: query: select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned@dt=2 +#### A masked pattern was here #### +POSTHOOK: query: select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned@dt=2 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0.0 val_0 1 +0.0 val_0 1 +0.0 val_0 1 +0.0 val_0 2 +0.0 val_0 2 +0.0 val_0 2 +4.0 val_2 1 +4.0 val_2 2 +8.0 val_4 1 +8.0 val_4 2 +10.0 val_5 1 +10.0 val_5 1 +10.0 val_5 1 +10.0 val_5 2 +10.0 val_5 2 +10.0 val_5 2 +16.0 val_8 1 +16.0 val_8 2 +18.0 val_9 1 +18.0 val_9 2 Index: ql/src/test/results/clientpositive/stats11.q.out =================================================================== --- ql/src/test/results/clientpositive/stats11.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/stats11.q.out (working copy) @@ -909,7 +909,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part numFiles 4 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 @@ -1153,7 +1152,6 @@ hdfs directory: true #### A masked pattern was here #### - PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value from srcbucket_mapjoin a join srcbucket_mapjoin_part b Index: ql/src/test/results/clientpositive/input23.q.out =================================================================== --- ql/src/test/results/clientpositive/input23.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/input23.q.out (working copy) @@ -71,7 +71,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/groupby_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_ppr.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/groupby_ppr.q.out (working copy) @@ -70,7 +70,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -117,7 +116,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/input_part7.q.out =================================================================== --- ql/src/test/results/clientpositive/input_part7.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/input_part7.q.out (working copy) @@ -150,7 +150,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -197,7 +196,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/join33.q.out =================================================================== --- ql/src/test/results/clientpositive/join33.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/join33.q.out (working copy) @@ -210,7 +210,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/input_part2.q.out =================================================================== --- ql/src/test/results/clientpositive/input_part2.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/input_part2.q.out (working copy) @@ -167,7 +167,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -214,7 +213,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/partition_wise_fileformat12.q.out =================================================================== --- ql/src/test/results/clientpositive/partition_wise_fileformat12.q.out (revision 0) +++ ql/src/test/results/clientpositive/partition_wise_fileformat12.q.out (working copy) @@ -0,0 +1,216 @@ +PREHOOK: query: -- This tests that the schema can be changed for binary serde data +create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- This tests that the schema can be changed for binary serde data +create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@partition_test_partitioned +PREHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Output: default@partition_test_partitioned +POSTHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Output: default@partition_test_partitioned +PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_test_partitioned@dt=1 +POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_test_partitioned@dt=1 +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 1 +238 val_238 1 +PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476.0 val_238 +476.0 val_238 +PREHOOK: query: alter table partition_test_partitioned change key key int +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Output: default@partition_test_partitioned +POSTHOOK: query: alter table partition_test_partitioned change key key int +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Output: default@partition_test_partitioned +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476 val_238 +476 val_238 +PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 1 +238 val_238 1 +PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='2') select * from src where key = 97 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_test_partitioned@dt=2 +POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='2') select * from src where key = 97 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_test_partitioned@dt=2 +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table partition_test_partitioned add columns (value2 string) +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Output: default@partition_test_partitioned +POSTHOOK: query: alter table partition_test_partitioned add columns (value2 string) +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Output: default@partition_test_partitioned +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned@dt=2 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned@dt=2 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476 val_238 +476 val_238 +194 val_97 +194 val_97 +PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned@dt=2 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned@dt=2 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 NULL 1 +238 val_238 NULL 1 +97 val_97 NULL 2 +97 val_97 NULL 2 +PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='3') select key, value, value from src where key = 200 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_test_partitioned@dt=3 +POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='3') select key, value, value from src where key = 200 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_test_partitioned@dt=3 +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key+key, value, value2 from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned@dt=2 +PREHOOK: Input: default@partition_test_partitioned@dt=3 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value, value2 from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned@dt=2 +POSTHOOK: Input: default@partition_test_partitioned@dt=3 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476 val_238 NULL +476 val_238 NULL +194 val_97 NULL +194 val_97 NULL +400 val_200 val_200 +400 val_200 val_200 +PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned@dt=2 +PREHOOK: Input: default@partition_test_partitioned@dt=3 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned@dt=2 +POSTHOOK: Input: default@partition_test_partitioned@dt=3 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 NULL 1 +238 val_238 NULL 1 +97 val_97 NULL 2 +97 val_97 NULL 2 +200 val_200 val_200 3 +200 val_200 val_200 3 Index: ql/src/test/results/clientpositive/groupby_map_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_map_ppr.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/groupby_map_ppr.q.out (working copy) @@ -87,7 +87,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -134,7 +133,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketcontext_7.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketcontext_7.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketcontext_7.q.out (working copy) @@ -175,7 +175,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -183,7 +182,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -225,7 +223,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -233,7 +230,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -423,7 +419,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -431,7 +426,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -473,7 +467,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -481,7 +474,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/combine2_hadoop20.q.out =================================================================== --- ql/src/test/results/clientpositive/combine2_hadoop20.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/combine2_hadoop20.q.out (working copy) @@ -250,7 +250,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 1 partition_columns value rawDataSize 2 @@ -296,7 +295,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 3 partition_columns value rawDataSize 3 @@ -342,7 +340,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 1 partition_columns value rawDataSize 1 @@ -388,7 +385,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 1 partition_columns value rawDataSize 1 @@ -434,7 +430,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 3 partition_columns value rawDataSize 3 @@ -480,7 +475,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 1 partition_columns value rawDataSize 1 @@ -526,7 +520,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 1 partition_columns value rawDataSize 1 @@ -572,7 +565,6 @@ #### A masked pattern was here #### name default.combine2 numFiles 1 - numPartitions 8 numRows 1 partition_columns value rawDataSize 2 Index: ql/src/test/results/clientpositive/bucketcontext_2.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketcontext_2.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketcontext_2.q.out (working copy) @@ -150,7 +150,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -158,7 +157,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -200,7 +198,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -208,7 +205,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -396,7 +392,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -404,7 +399,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -446,7 +440,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -454,7 +447,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out (working copy) @@ -97,7 +97,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -144,7 +143,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin7.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin7.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketmapjoin7.q.out (working copy) @@ -162,7 +162,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin11.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin11.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketmapjoin11.q.out (working copy) @@ -212,7 +212,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 0 partition_columns part rawDataSize 0 @@ -260,7 +259,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 4 - numPartitions 2 numRows 0 partition_columns part rawDataSize 0 @@ -497,7 +495,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 0 partition_columns part rawDataSize 0 @@ -545,7 +542,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 4 - numPartitions 2 numRows 0 partition_columns part rawDataSize 0 Index: ql/src/test/results/clientpositive/join26.q.out =================================================================== --- ql/src/test/results/clientpositive/join26.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/join26.q.out (working copy) @@ -156,7 +156,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin2.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin2.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketmapjoin2.q.out (working copy) @@ -198,7 +198,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part numFiles 4 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 @@ -709,7 +708,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_2 numFiles 2 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 @@ -1414,7 +1412,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part numFiles 4 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/join_map_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/join_map_ppr.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/join_map_ppr.q.out (working copy) @@ -162,7 +162,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -729,7 +728,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/smb_mapjoin_11.q.out =================================================================== --- ql/src/test/results/clientpositive/smb_mapjoin_11.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/smb_mapjoin_11.q.out (working copy) @@ -136,7 +136,6 @@ partition values: ds 1 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 16 bucket_field_name key columns key,value @@ -144,7 +143,6 @@ #### A masked pattern was here #### name default.test_table1 numFiles 16 - numPartitions 1 numRows 500 partition_columns ds rawDataSize 5312 Index: ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out =================================================================== --- ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out (working copy) @@ -79,7 +79,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -126,7 +125,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -272,7 +270,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -319,7 +316,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -366,7 +362,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -413,7 +408,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/sample1.q.out =================================================================== --- ql/src/test/results/clientpositive/sample1.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/sample1.q.out (working copy) @@ -106,7 +106,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/partition_wise_fileformat8.q.out =================================================================== --- ql/src/test/results/clientpositive/partition_wise_fileformat8.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/partition_wise_fileformat8.q.out (working copy) @@ -105,3 +105,43 @@ 100 val_100 3 103 val_103 1 103 val_103 1 +PREHOOK: query: select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned@dt=2 +PREHOOK: Input: default@partition_test_partitioned@dt=3 +#### A masked pattern was here #### +POSTHOOK: query: select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned@dt=2 +POSTHOOK: Input: default@partition_test_partitioned@dt=3 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0.0 val_0 1 +0.0 val_0 1 +0.0 val_0 1 +0.0 val_0 2 +0.0 val_0 2 +0.0 val_0 2 +0.0 val_0 3 +0.0 val_0 3 +0.0 val_0 3 +4.0 val_2 1 +4.0 val_2 2 +4.0 val_2 3 +8.0 val_4 1 +8.0 val_4 2 +8.0 val_4 3 +10.0 val_5 1 +10.0 val_5 1 +10.0 val_5 1 +10.0 val_5 2 +10.0 val_5 2 Index: ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out =================================================================== --- ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out (working copy) @@ -215,7 +215,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 500 partition_columns part rawDataSize 5312 @@ -263,7 +262,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 500 partition_columns part rawDataSize 5312 Index: ql/src/test/results/clientpositive/union22.q.out =================================================================== --- ql/src/test/results/clientpositive/union22.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/union22.q.out (working copy) @@ -194,7 +194,6 @@ #### A masked pattern was here #### name default.dst_union22 numFiles 1 - numPartitions 1 numRows 500 partition_columns ds rawDataSize 11124 @@ -439,7 +438,6 @@ #### A masked pattern was here #### name default.dst_union22_delta numFiles 1 - numPartitions 1 numRows 500 partition_columns ds rawDataSize 16936 Index: ql/src/test/results/clientpositive/input_part1.q.out =================================================================== --- ql/src/test/results/clientpositive/input_part1.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/input_part1.q.out (working copy) @@ -102,7 +102,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/partition_wise_fileformat11.q.out =================================================================== --- ql/src/test/results/clientpositive/partition_wise_fileformat11.q.out (revision 0) +++ ql/src/test/results/clientpositive/partition_wise_fileformat11.q.out (working copy) @@ -0,0 +1,123 @@ +PREHOOK: query: -- This tests that the schema can be changed for binary serde data +create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- This tests that the schema can be changed for binary serde data +create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@partition_test_partitioned +PREHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Output: default@partition_test_partitioned +POSTHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Output: default@partition_test_partitioned +PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_test_partitioned@dt=1 +POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_test_partitioned@dt=1 +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 1 +238 val_238 1 +PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476.0 val_238 +476.0 val_238 +PREHOOK: query: alter table partition_test_partitioned change key key int +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Output: default@partition_test_partitioned +POSTHOOK: query: alter table partition_test_partitioned change key key int +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Output: default@partition_test_partitioned +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476 val_238 +476 val_238 +PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 1 +238 val_238 1 +PREHOOK: query: alter table partition_test_partitioned add columns (value2 string) +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Output: default@partition_test_partitioned +POSTHOOK: query: alter table partition_test_partitioned add columns (value2 string) +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Output: default@partition_test_partitioned +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476 val_238 +476 val_238 +PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_partitioned@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 NULL 1 +238 val_238 NULL 1 Index: ql/src/test/results/clientpositive/transform_ppr2.q.out =================================================================== --- ql/src/test/results/clientpositive/transform_ppr2.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/transform_ppr2.q.out (working copy) @@ -92,7 +92,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -139,7 +138,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/union_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/union_ppr.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/union_ppr.q.out (working copy) @@ -152,7 +152,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -199,7 +198,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketcontext_6.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketcontext_6.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketcontext_6.q.out (working copy) @@ -149,7 +149,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -157,7 +156,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -199,7 +197,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -207,7 +204,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -393,7 +389,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -401,7 +396,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -443,7 +437,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -451,7 +444,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 2 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketcontext_1.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketcontext_1.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketcontext_1.q.out (working copy) @@ -162,7 +162,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -170,7 +169,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -212,7 +210,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -220,7 +217,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -408,7 +404,6 @@ partition values: ds 2008-04-08 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -416,7 +411,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 @@ -458,7 +452,6 @@ partition values: ds 2008-04-09 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 4 bucket_field_name key columns key,value @@ -466,7 +459,6 @@ #### A masked pattern was here #### name default.bucket_big numFiles 4 - numPartitions 2 numRows 0 partition_columns ds rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin10.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin10.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketmapjoin10.q.out (working copy) @@ -199,7 +199,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 2 numRows 0 partition_columns part rawDataSize 0 @@ -247,7 +246,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 3 - numPartitions 2 numRows 0 partition_columns part rawDataSize 0 Index: ql/src/test/results/clientpositive/bucketmapjoin1.q.out =================================================================== --- ql/src/test/results/clientpositive/bucketmapjoin1.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/bucketmapjoin1.q.out (working copy) @@ -982,7 +982,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part numFiles 4 - numPartitions 1 numRows 0 partition_columns ds rawDataSize 0 @@ -1226,7 +1225,6 @@ hdfs directory: true #### A masked pattern was here #### - PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value from srcbucket_mapjoin a join srcbucket_mapjoin_part b Index: ql/src/test/results/clientpositive/sample10.q.out =================================================================== --- ql/src/test/results/clientpositive/sample10.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/sample10.q.out (working copy) @@ -115,7 +115,6 @@ #### A masked pattern was here #### name default.srcpartbucket numFiles 4 - numPartitions 4 numRows 10 partition_columns ds/hr rawDataSize 60 @@ -164,7 +163,6 @@ #### A masked pattern was here #### name default.srcpartbucket numFiles 4 - numPartitions 4 numRows 10 partition_columns ds/hr rawDataSize 60 @@ -213,7 +211,6 @@ #### A masked pattern was here #### name default.srcpartbucket numFiles 4 - numPartitions 4 numRows 10 partition_columns ds/hr rawDataSize 60 @@ -262,7 +259,6 @@ #### A masked pattern was here #### name default.srcpartbucket numFiles 4 - numPartitions 4 numRows 10 partition_columns ds/hr rawDataSize 60 Index: ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out =================================================================== --- ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out (working copy) @@ -151,7 +151,6 @@ partition values: part 1 properties: - SORTBUCKETCOLSPREFIX TRUE bucket_count 2 bucket_field_name key columns key,value @@ -159,7 +158,6 @@ #### A masked pattern was here #### name default.srcbucket_mapjoin_part_1 numFiles 2 - numPartitions 1 numRows 500 partition_columns part rawDataSize 5312 Index: ql/src/test/results/clientpositive/partition_wise_fileformat10.q.out =================================================================== --- ql/src/test/results/clientpositive/partition_wise_fileformat10.q.out (revision 0) +++ ql/src/test/results/clientpositive/partition_wise_fileformat10.q.out (working copy) @@ -0,0 +1,79 @@ +PREHOOK: query: -- This tests that the schema can be changed for binary serde data +create table prt(key string, value string) partitioned by (dt string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: -- This tests that the schema can be changed for binary serde data +create table prt(key string, value string) partitioned by (dt string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@prt +PREHOOK: query: insert overwrite table prt partition(dt='1') select * from src where key = 238 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@prt@dt=1 +POSTHOOK: query: insert overwrite table prt partition(dt='1') select * from src where key = 238 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@prt@dt=1 +POSTHOOK: Lineage: prt PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: prt PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from prt where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from prt where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: prt PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: prt PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 1 +238 val_238 1 +PREHOOK: query: select key+key, value from prt where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@prt +PREHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from prt where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@prt +POSTHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: prt PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: prt PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476.0 val_238 +476.0 val_238 +PREHOOK: query: alter table prt add columns (value2 string) +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@prt +PREHOOK: Output: default@prt +POSTHOOK: query: alter table prt add columns (value2 string) +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@prt +POSTHOOK: Output: default@prt +POSTHOOK: Lineage: prt PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: prt PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key+key, value from prt where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@prt +PREHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select key+key, value from prt where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@prt +POSTHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: prt PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: prt PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +476.0 val_238 +476.0 val_238 +PREHOOK: query: select * from prt where dt is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from prt where dt is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@prt@dt=1 +#### A masked pattern was here #### +POSTHOOK: Lineage: prt PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: prt PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +238 val_238 NULL 1 +238 val_238 NULL 1 Index: ql/src/test/results/clientpositive/transform_ppr1.q.out =================================================================== --- ql/src/test/results/clientpositive/transform_ppr1.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/transform_ppr1.q.out (working copy) @@ -90,7 +90,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -137,7 +136,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -184,7 +182,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 @@ -231,7 +228,6 @@ #### A masked pattern was here #### name default.srcpart numFiles 1 - numPartitions 4 numRows 0 partition_columns ds/hr rawDataSize 0 Index: ql/src/test/results/clientpositive/ppd_union_view.q.out =================================================================== --- ql/src/test/results/clientpositive/ppd_union_view.q.out (revision 1432949) +++ ql/src/test/results/clientpositive/ppd_union_view.q.out (working copy) @@ -260,7 +260,6 @@ #### A masked pattern was here #### name default.t1_mapping numFiles 1 - numPartitions 2 numRows 1 partition_columns ds rawDataSize 12 @@ -306,7 +305,6 @@ #### A masked pattern was here #### name default.t1_old numFiles 1 - numPartitions 2 numRows 1 partition_columns ds rawDataSize 14 @@ -803,7 +801,6 @@ #### A masked pattern was here #### name default.t1_new numFiles 1 - numPartitions 2 numRows 1 partition_columns ds rawDataSize 11 Index: ql/src/test/results/compiler/plan/input2.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input2.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/input2.q.xml (working copy) @@ -1601,10 +1601,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -1621,18 +1617,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -1645,10 +1629,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1656,10 +1636,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join3.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join3.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/join3.q.xml (working copy) @@ -182,10 +182,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -202,18 +198,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -226,10 +210,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -237,10 +217,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -354,10 +330,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -374,18 +346,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -398,10 +358,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -409,10 +365,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -526,10 +478,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -546,18 +494,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -570,10 +506,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -581,10 +513,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input4.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input4.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/input4.q.xml (working copy) @@ -182,10 +182,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -202,18 +198,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -226,10 +210,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -237,10 +217,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join5.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join5.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/join5.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -201,10 +177,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -221,18 +193,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -245,10 +205,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -256,10 +212,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input6.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input6.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/input6.q.xml (working copy) @@ -554,10 +554,6 @@ default.src1 - numFiles - 1 - - columns.types string:string @@ -574,18 +570,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -598,10 +582,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 216 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -609,10 +589,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input_testxpath2.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input_testxpath2.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/input_testxpath2.q.xml (working copy) @@ -29,10 +29,6 @@ default.src_thrift - numFiles - 1 - - columns.types @@ -49,18 +45,6 @@ org.apache.thrift.protocol.TBinaryProtocol - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - serialization.class org.apache.hadoop.hive.serde2.thrift.test.Complex @@ -77,10 +61,6 @@ org.apache.hadoop.mapred.SequenceFileInputFormat - totalSize - 1606 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -88,10 +68,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join7.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join7.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/join7.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -201,10 +177,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -221,18 +193,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -245,10 +205,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -256,10 +212,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -373,10 +325,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -393,18 +341,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -417,10 +353,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -428,10 +360,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input8.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input8.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/input8.q.xml (working copy) @@ -29,10 +29,6 @@ default.src1 - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 216 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/union.q.xml =================================================================== --- ql/src/test/results/compiler/plan/union.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/union.q.xml (working copy) @@ -427,10 +427,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -447,18 +443,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -471,10 +455,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -482,10 +462,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -599,10 +575,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -619,18 +591,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -643,10 +603,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -654,10 +610,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/udf4.q.xml =================================================================== --- ql/src/test/results/compiler/plan/udf4.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/udf4.q.xml (working copy) @@ -64,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/udf6.q.xml =================================================================== --- ql/src/test/results/compiler/plan/udf6.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/udf6.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input_part1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input_part1.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/input_part1.q.xml (working copy) @@ -39,7 +39,7 @@ numFiles - 4 + 1 columns.types @@ -66,10 +66,6 @@ 0 - numPartitions - 4 - - partition_columns ds/hr @@ -87,7 +83,7 @@ totalSize - 23248 + 5812 file.outputformat @@ -865,10 +861,6 @@ 0 - numPartitions - 4 - - partition_columns ds/hr Index: ql/src/test/results/compiler/plan/groupby2.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby2.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/groupby2.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/groupby4.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby4.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/groupby4.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/groupby6.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby6.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/groupby6.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/case_sensitivity.q.xml =================================================================== --- ql/src/test/results/compiler/plan/case_sensitivity.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/case_sensitivity.q.xml (working copy) @@ -554,10 +554,6 @@ default.src_thrift - numFiles - 1 - - columns.types @@ -574,18 +570,6 @@ org.apache.thrift.protocol.TBinaryProtocol - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - serialization.class org.apache.hadoop.hive.serde2.thrift.test.Complex @@ -602,10 +586,6 @@ org.apache.hadoop.mapred.SequenceFileInputFormat - totalSize - 1606 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -613,10 +593,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/sample1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample1.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/sample1.q.xml (working copy) @@ -39,7 +39,7 @@ numFiles - 4 + 1 columns.types @@ -66,10 +66,6 @@ 0 - numPartitions - 4 - - partition_columns ds/hr @@ -87,7 +83,7 @@ totalSize - 23248 + 5812 file.outputformat @@ -986,10 +982,6 @@ 0 - numPartitions - 4 - - partition_columns ds/hr Index: ql/src/test/results/compiler/plan/sample3.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample3.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/sample3.q.xml (working copy) @@ -554,10 +554,6 @@ default.srcbucket - numFiles - 2 - - columns.types int:string @@ -578,18 +574,6 @@ 1 - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count 2 @@ -602,10 +586,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 11603 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -613,10 +593,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/sample5.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample5.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/sample5.q.xml (working copy) @@ -554,10 +554,6 @@ default.srcbucket - numFiles - 2 - - columns.types int:string @@ -578,18 +574,6 @@ 1 - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count 2 @@ -602,10 +586,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 11603 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -613,10 +593,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/sample7.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample7.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/sample7.q.xml (working copy) @@ -554,10 +554,6 @@ default.srcbucket - numFiles - 2 - - columns.types int:string @@ -578,18 +574,6 @@ 1 - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count 2 @@ -602,10 +586,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 11603 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -613,10 +593,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/cast1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/cast1.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/cast1.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input1.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/input1.q.xml (working copy) @@ -554,10 +554,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -574,18 +570,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -598,10 +582,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -609,10 +589,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join2.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join2.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/join2.q.xml (working copy) @@ -193,10 +193,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -213,18 +209,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -237,10 +221,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -248,10 +228,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -1282,7 +1258,7 @@ 200 - + 1 @@ -1773,10 +1749,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -1793,18 +1765,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -1817,10 +1777,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1828,10 +1784,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -1945,10 +1897,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -1965,18 +1913,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -1989,10 +1925,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2000,10 +1932,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input3.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input3.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/input3.q.xml (working copy) @@ -1978,10 +1978,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -1998,18 +1994,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -2022,10 +2006,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2033,10 +2013,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join4.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join4.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/join4.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -201,10 +177,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -221,18 +193,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -245,10 +205,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -256,10 +212,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input5.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input5.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/input5.q.xml (working copy) @@ -182,10 +182,6 @@ default.src_thrift - numFiles - 1 - - columns.types @@ -202,18 +198,6 @@ org.apache.thrift.protocol.TBinaryProtocol - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - serialization.class org.apache.hadoop.hive.serde2.thrift.test.Complex @@ -230,10 +214,6 @@ org.apache.hadoop.mapred.SequenceFileInputFormat - totalSize - 1606 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -241,10 +221,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join6.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join6.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/join6.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -201,10 +177,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -221,18 +193,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -245,10 +205,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -256,10 +212,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input7.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input7.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/input7.q.xml (working copy) @@ -554,10 +554,6 @@ default.src1 - numFiles - 1 - - columns.types string:string @@ -574,18 +570,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -598,10 +582,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 216 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -609,10 +589,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join8.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join8.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/join8.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -201,10 +177,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -221,18 +193,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -245,10 +205,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -256,10 +212,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input_testsequencefile.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input_testsequencefile.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/input_testsequencefile.q.xml (working copy) @@ -554,10 +554,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -574,18 +570,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -598,10 +582,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -609,10 +589,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input9.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input9.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/input9.q.xml (working copy) @@ -554,10 +554,6 @@ default.src1 - numFiles - 1 - - columns.types string:string @@ -574,18 +570,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -598,10 +582,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 216 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -609,10 +589,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/udf1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/udf1.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/udf1.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input_testxpath.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input_testxpath.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/input_testxpath.q.xml (working copy) @@ -29,10 +29,6 @@ default.src_thrift - numFiles - 1 - - columns.types @@ -49,18 +45,6 @@ org.apache.thrift.protocol.TBinaryProtocol - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - serialization.class org.apache.hadoop.hive.serde2.thrift.test.Complex @@ -77,10 +61,6 @@ org.apache.hadoop.mapred.SequenceFileInputFormat - totalSize - 1606 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -88,10 +68,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/groupby1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby1.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/groupby1.q.xml (working copy) @@ -182,10 +182,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -202,18 +198,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -226,10 +210,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -237,10 +217,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/udf_case.q.xml =================================================================== --- ql/src/test/results/compiler/plan/udf_case.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/udf_case.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/subq.q.xml =================================================================== --- ql/src/test/results/compiler/plan/subq.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/subq.q.xml (working copy) @@ -427,10 +427,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -447,18 +443,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -471,10 +455,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -482,10 +462,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/groupby3.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby3.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/groupby3.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/groupby5.q.xml =================================================================== --- ql/src/test/results/compiler/plan/groupby5.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/groupby5.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/udf_when.q.xml =================================================================== --- ql/src/test/results/compiler/plan/udf_when.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/udf_when.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/input20.q.xml =================================================================== --- ql/src/test/results/compiler/plan/input20.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/input20.q.xml (working copy) @@ -29,10 +29,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -49,18 +45,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -73,10 +57,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,10 +64,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/sample2.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample2.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/sample2.q.xml (working copy) @@ -554,10 +554,6 @@ default.srcbucket - numFiles - 2 - - columns.types int:string @@ -578,18 +574,6 @@ 1 - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count 2 @@ -602,10 +586,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 11603 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -613,10 +593,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/sample4.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample4.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/sample4.q.xml (working copy) @@ -554,10 +554,6 @@ default.srcbucket - numFiles - 2 - - columns.types int:string @@ -578,18 +574,6 @@ 1 - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count 2 @@ -602,10 +586,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 11603 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -613,10 +593,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/sample6.q.xml =================================================================== --- ql/src/test/results/compiler/plan/sample6.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/sample6.q.xml (working copy) @@ -554,10 +554,6 @@ default.srcbucket - numFiles - 2 - - columns.types int:string @@ -578,18 +574,6 @@ 1 - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count 2 @@ -602,10 +586,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 11603 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -613,10 +593,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/results/compiler/plan/join1.q.xml =================================================================== --- ql/src/test/results/compiler/plan/join1.q.xml (revision 1432949) +++ ql/src/test/results/compiler/plan/join1.q.xml (working copy) @@ -182,10 +182,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -202,18 +198,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -226,10 +210,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -237,10 +217,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - @@ -354,10 +330,6 @@ default.src - numFiles - 1 - - columns.types string:string @@ -374,18 +346,6 @@ key,value - rawDataSize - 0 - - - numRows - 0 - - - numPartitions - 0 - - bucket_count -1 @@ -398,10 +358,6 @@ org.apache.hadoop.mapred.TextInputFormat - totalSize - 5812 - - file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -409,10 +365,6 @@ location #### A masked pattern was here #### - - transient_lastDdlTime - #### A masked pattern was here #### - Index: ql/src/test/org/apache/hadoop/hive/ql/metadata/TestPartition.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestPartition.java (revision 1432949) +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestPartition.java (working copy) @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.metadata; - -import java.net.URI; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import junit.framework.TestCase; - -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; - -/** - * Test the partition class. - */ -public class TestPartition extends TestCase { - - private static final String PARTITION_COL = "partcol"; - private static final String PARTITION_VALUE = "value"; - private static final String TABLENAME = "tablename"; - - /** - * Test that the Partition spec is created properly. - */ - public void testPartition() throws HiveException, URISyntaxException { - StorageDescriptor sd = new StorageDescriptor(); - sd.setLocation("partlocation"); - - Partition tp = new Partition(); - tp.setTableName(TABLENAME); - tp.setSd(sd); - - List values = new ArrayList(); - values.add(PARTITION_VALUE); - tp.setValues(values); - - List partCols = new ArrayList(); - partCols.add(new FieldSchema(PARTITION_COL, "string", "")); - - Table tbl = new Table("default", TABLENAME); - tbl.setDataLocation(new URI("tmplocation")); - tbl.setPartCols(partCols); - - Map spec = new org.apache.hadoop.hive.ql.metadata.Partition(tbl, tp).getSpec(); - assertFalse(spec.isEmpty()); - assertEquals(spec.get(PARTITION_COL), PARTITION_VALUE); - } - -} Index: ql/src/test/queries/clientpositive/partition_wise_fileformat8.q =================================================================== --- ql/src/test/queries/clientpositive/partition_wise_fileformat8.q (revision 1432949) +++ ql/src/test/queries/clientpositive/partition_wise_fileformat8.q (working copy) @@ -10,3 +10,4 @@ insert overwrite table partition_test_partitioned partition(dt='3') select * from src; select * from partition_test_partitioned where dt is not null order by key, value, dt limit 20; +select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20; Index: ql/src/test/queries/clientpositive/partition_wise_fileformat12.q =================================================================== --- ql/src/test/queries/clientpositive/partition_wise_fileformat12.q (revision 0) +++ ql/src/test/queries/clientpositive/partition_wise_fileformat12.q (working copy) @@ -0,0 +1,26 @@ +set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; + +-- This tests that the schema can be changed for binary serde data +create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile; +alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; +insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238; + +select * from partition_test_partitioned where dt is not null; +select key+key, value from partition_test_partitioned where dt is not null; + +alter table partition_test_partitioned change key key int; + +select key+key, value from partition_test_partitioned where dt is not null; +select * from partition_test_partitioned where dt is not null; + +insert overwrite table partition_test_partitioned partition(dt='2') select * from src where key = 97; + +alter table partition_test_partitioned add columns (value2 string); + +select key+key, value from partition_test_partitioned where dt is not null; +select * from partition_test_partitioned where dt is not null; + +insert overwrite table partition_test_partitioned partition(dt='3') select key, value, value from src where key = 200; + +select key+key, value, value2 from partition_test_partitioned where dt is not null; +select * from partition_test_partitioned where dt is not null; Index: ql/src/test/queries/clientpositive/partition_wise_fileformat9.q =================================================================== --- ql/src/test/queries/clientpositive/partition_wise_fileformat9.q (revision 1432949) +++ ql/src/test/queries/clientpositive/partition_wise_fileformat9.q (working copy) @@ -8,3 +8,5 @@ insert overwrite table partition_test_partitioned partition(dt='2') select * from src; select * from partition_test_partitioned where dt is not null order by key, value, dt limit 20; +select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20; + Index: ql/src/test/queries/clientpositive/partition_wise_fileformat10.q =================================================================== --- ql/src/test/queries/clientpositive/partition_wise_fileformat10.q (revision 0) +++ ql/src/test/queries/clientpositive/partition_wise_fileformat10.q (working copy) @@ -0,0 +1,13 @@ +set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; + +-- This tests that the schema can be changed for binary serde data +create table prt(key string, value string) partitioned by (dt string); +insert overwrite table prt partition(dt='1') select * from src where key = 238; + +select * from prt where dt is not null; +select key+key, value from prt where dt is not null; + +alter table prt add columns (value2 string); + +select key+key, value from prt where dt is not null; +select * from prt where dt is not null; Index: ql/src/test/queries/clientpositive/partition_wise_fileformat11.q =================================================================== --- ql/src/test/queries/clientpositive/partition_wise_fileformat11.q (revision 0) +++ ql/src/test/queries/clientpositive/partition_wise_fileformat11.q (working copy) @@ -0,0 +1,19 @@ +set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; + +-- This tests that the schema can be changed for binary serde data +create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile; +alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; +insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238; + +select * from partition_test_partitioned where dt is not null; +select key+key, value from partition_test_partitioned where dt is not null; + +alter table partition_test_partitioned change key key int; + +select key+key, value from partition_test_partitioned where dt is not null; +select * from partition_test_partitioned where dt is not null; + +alter table partition_test_partitioned add columns (value2 string); + +select key+key, value from partition_test_partitioned where dt is not null; +select * from partition_test_partitioned where dt is not null; Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (revision 1432949) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (working copy) @@ -826,7 +826,12 @@ partDir.add(p); try { - partDesc.add(Utilities.getPartitionDescFromTableDesc(tblDesc, part)); + if (part.getTable().isPartitioned()) { + partDesc.add(Utilities.getPartitionDesc(part)); + } + else { + partDesc.add(Utilities.getPartitionDescFromTableDesc(tblDesc, part)); + } } catch (HiveException e) { LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); throw new SemanticException(e.getMessage(), e); Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (revision 1432949) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (working copy) @@ -214,7 +214,7 @@ getInputFormatClass(); // This will set up field: outputFormatClass getOutputFormatClass(); - + getDeserializer(); } public String getName() { @@ -276,6 +276,10 @@ return MetaStoreUtils.getSchema(tPartition, table.getTTable()); } + public Properties getSchemaFromPartitionSchema() { + return MetaStoreUtils.getPartitionSchema(tPartition, table.getTTable()); + } + public Properties getSchemaFromTableSchema(Properties tblSchema) { return MetaStoreUtils.getPartSchemaFromTableSchema(tPartition.getSd(), table.getTTable().getSd(), tPartition.getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys(), Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (revision 1432949) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (working copy) @@ -38,6 +38,7 @@ import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.PartitionDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.serde2.Deserializer; @@ -45,6 +46,8 @@ import org.apache.hadoop.hive.serde2.SerDeStats; import org.apache.hadoop.hive.serde2.SerDeUtils; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; @@ -78,7 +81,9 @@ private transient Writable[] vcValues; private transient List vcs; private transient Object[] rowWithPartAndVC; - private transient StructObjectInspector rowObjectInspector; + private transient StructObjectInspector tblRowObjectInspector; + // convert from partition to table schema + private transient Converter partTblObjectInspectorConverter; private transient boolean isPartitioned; private transient boolean hasVC; private Map opCtxMap; @@ -141,15 +146,16 @@ } private static class MapOpCtx { - boolean isPartitioned; - StructObjectInspector rawRowObjectInspector; // without partition - StructObjectInspector partObjectInspector; // partition - StructObjectInspector rowObjectInspector; - Object[] rowWithPart; - Object[] rowWithPartAndVC; - Deserializer deserializer; - public String tableName; - public String partName; + private final boolean isPartitioned; + private final StructObjectInspector tblRawRowObjectInspector; // without partition + private final StructObjectInspector partObjectInspector; // partition + private StructObjectInspector rowObjectInspector; + private final Converter partTblObjectInspectorConverter; + private final Object[] rowWithPart; + private Object[] rowWithPartAndVC; + private final Deserializer deserializer; + private String tableName; + private String partName; /** * @param isPartitioned @@ -158,18 +164,20 @@ */ public MapOpCtx(boolean isPartitioned, StructObjectInspector rowObjectInspector, - StructObjectInspector rawRowObjectInspector, + StructObjectInspector tblRawRowObjectInspector, StructObjectInspector partObjectInspector, Object[] rowWithPart, Object[] rowWithPartAndVC, - Deserializer deserializer) { + Deserializer deserializer, + Converter partTblObjectInspectorConverter) { this.isPartitioned = isPartitioned; this.rowObjectInspector = rowObjectInspector; - this.rawRowObjectInspector = rawRowObjectInspector; + this.tblRawRowObjectInspector = tblRawRowObjectInspector; this.partObjectInspector = partObjectInspector; this.rowWithPart = rowWithPart; this.rowWithPartAndVC = rowWithPartAndVC; this.deserializer = deserializer; + this.partTblObjectInspectorConverter = partTblObjectInspectorConverter; } /** @@ -186,6 +194,10 @@ return rowObjectInspector; } + public StructObjectInspector getTblRawRowObjectInspector() { + return tblRawRowObjectInspector; + } + /** * @return the rowWithPart */ @@ -206,6 +218,10 @@ public Deserializer getDeserializer() { return deserializer; } + + public Converter getPartTblObjectInspectorConverter() { + return partTblObjectInspectorConverter; + } } /** @@ -225,38 +241,45 @@ } private MapOpCtx initObjectInspector(MapredWork conf, - Configuration hconf, String onefile) throws HiveException, + Configuration hconf, String onefile, Map convertedOI) + throws HiveException, ClassNotFoundException, InstantiationException, IllegalAccessException, SerDeException { - PartitionDesc td = conf.getPathToPartitionInfo().get(onefile); - LinkedHashMap partSpec = td.getPartSpec(); - Properties tblProps = td.getProperties(); + PartitionDesc pd = conf.getPathToPartitionInfo().get(onefile); + LinkedHashMap partSpec = pd.getPartSpec(); + Properties partProps = + (pd.getPartSpec() == null || pd.getPartSpec().isEmpty()) ? + pd.getTableDesc().getProperties() : pd.getProperties(); - Class sdclass = td.getDeserializerClass(); + Class sdclass = pd.getDeserializerClass(); if (sdclass == null) { - String className = td.getSerdeClassName(); + String className = pd.getSerdeClassName(); if ((className == "") || (className == null)) { throw new HiveException( "SerDe class or the SerDe class name is not set for table: " - + td.getProperties().getProperty("name")); + + pd.getProperties().getProperty("name")); } sdclass = hconf.getClassByName(className); } - String tableName = String.valueOf(tblProps.getProperty("name")); + String tableName = String.valueOf(partProps.getProperty("name")); String partName = String.valueOf(partSpec); - // HiveConf.setVar(hconf, HiveConf.ConfVars.HIVETABLENAME, tableName); - // HiveConf.setVar(hconf, HiveConf.ConfVars.HIVEPARTITIONNAME, partName); - Deserializer deserializer = (Deserializer) sdclass.newInstance(); - deserializer.initialize(hconf, tblProps); - StructObjectInspector rawRowObjectInspector = (StructObjectInspector) deserializer + Deserializer partDeserializer = (Deserializer) sdclass.newInstance(); + partDeserializer.initialize(hconf, partProps); + StructObjectInspector partRawRowObjectInspector = (StructObjectInspector) partDeserializer .getObjectInspector(); + StructObjectInspector tblRawRowObjectInspector = convertedOI.get(pd.getTableDesc()); + + partTblObjectInspectorConverter = + ObjectInspectorConverters.getConverter(partRawRowObjectInspector, + tblRawRowObjectInspector); + MapOpCtx opCtx = null; // Next check if this table has partitions and if so // get the list of partition names as well as allocate // the serdes for the partition columns - String pcols = tblProps + String pcols = partProps .getProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS); // Log LOG = LogFactory.getLog(MapOperator.class.getName()); if (pcols != null && pcols.length() > 0) { @@ -285,16 +308,16 @@ rowWithPart[1] = partValues; StructObjectInspector rowObjectInspector = ObjectInspectorFactory .getUnionStructObjectInspector(Arrays - .asList(new StructObjectInspector[] {rawRowObjectInspector, partObjectInspector})); + .asList(new StructObjectInspector[] {tblRawRowObjectInspector, partObjectInspector})); // LOG.info("dump " + tableName + " " + partName + " " + // rowObjectInspector.getTypeName()); - opCtx = new MapOpCtx(true, rowObjectInspector, rawRowObjectInspector, partObjectInspector, - rowWithPart, null, deserializer); + opCtx = new MapOpCtx(true, rowObjectInspector, tblRawRowObjectInspector, partObjectInspector, + rowWithPart, null, partDeserializer, partTblObjectInspectorConverter); } else { // LOG.info("dump2 " + tableName + " " + partName + " " + // rowObjectInspector.getTypeName()); - opCtx = new MapOpCtx(false, rawRowObjectInspector, rawRowObjectInspector, null, null, - null, deserializer); + opCtx = new MapOpCtx(false, tblRawRowObjectInspector, tblRawRowObjectInspector, null, null, + null, partDeserializer, partTblObjectInspectorConverter); } opCtx.tableName = tableName; opCtx.partName = partName; @@ -312,7 +335,8 @@ isPartitioned = opCtxMap.get(inp).isPartitioned(); rowWithPart = opCtxMap.get(inp).getRowWithPart(); rowWithPartAndVC = opCtxMap.get(inp).getRowWithPartAndVC(); - rowObjectInspector = opCtxMap.get(inp).getRowObjectInspector(); + tblRowObjectInspector = opCtxMap.get(inp).getRowObjectInspector(); + partTblObjectInspectorConverter = opCtxMap.get(inp).getPartTblObjectInspectorConverter(); if (listInputPaths.contains(inp)) { return; } @@ -320,7 +344,8 @@ listInputPaths.add(inp); if (op instanceof TableScanOperator) { - StructObjectInspector rawRowObjectInspector = opCtxMap.get(inp).rawRowObjectInspector; + StructObjectInspector tblRawRowObjectInspector = + opCtxMap.get(inp).getTblRawRowObjectInspector(); StructObjectInspector partObjectInspector = opCtxMap.get(inp).partObjectInspector; TableScanOperator tsOp = (TableScanOperator) op; TableScanDesc tsDesc = tsOp.getConf(); @@ -348,22 +373,92 @@ this.rowWithPartAndVC = new Object[2]; } if (partObjectInspector == null) { - this.rowObjectInspector = ObjectInspectorFactory.getUnionStructObjectInspector(Arrays + this.tblRowObjectInspector = ObjectInspectorFactory.getUnionStructObjectInspector(Arrays .asList(new StructObjectInspector[] { - rowObjectInspector, vcStructObjectInspector})); + tblRowObjectInspector, vcStructObjectInspector})); } else { - this.rowObjectInspector = ObjectInspectorFactory.getUnionStructObjectInspector(Arrays + this.tblRowObjectInspector = ObjectInspectorFactory.getUnionStructObjectInspector(Arrays .asList(new StructObjectInspector[] { - rawRowObjectInspector, partObjectInspector, + tblRawRowObjectInspector, partObjectInspector, vcStructObjectInspector})); } - opCtxMap.get(inp).rowObjectInspector = this.rowObjectInspector; + opCtxMap.get(inp).rowObjectInspector = this.tblRowObjectInspector; opCtxMap.get(inp).rowWithPartAndVC = this.rowWithPartAndVC; } } } } + private Map getConvertedOI(Configuration hconf) + throws HiveException { + Map tableDescOI = + new HashMap(); + Set identityConverterTableDesc = new HashSet(); + try + { + for (String onefile : conf.getPathToAliases().keySet()) { + PartitionDesc pd = conf.getPathToPartitionInfo().get(onefile); + TableDesc tableDesc = pd.getTableDesc(); + Properties tblProps = tableDesc.getProperties(); + // If the partition does not exist, use table properties + Properties partProps = + (pd.getPartSpec() == null || pd.getPartSpec().isEmpty()) ? + tblProps : pd.getProperties(); + + Class sdclass = pd.getDeserializerClass(); + if (sdclass == null) { + String className = pd.getSerdeClassName(); + if ((className == "") || (className == null)) { + throw new HiveException( + "SerDe class or the SerDe class name is not set for table: " + + pd.getProperties().getProperty("name")); + } + sdclass = hconf.getClassByName(className); + } + + Deserializer partDeserializer = (Deserializer) sdclass.newInstance(); + partDeserializer.initialize(hconf, partProps); + StructObjectInspector partRawRowObjectInspector = (StructObjectInspector) partDeserializer + .getObjectInspector(); + + StructObjectInspector tblRawRowObjectInspector = tableDescOI.get(tableDesc); + if ((tblRawRowObjectInspector == null) || + (identityConverterTableDesc.contains(tableDesc))) { + sdclass = tableDesc.getDeserializerClass(); + if (sdclass == null) { + String className = tableDesc.getSerdeClassName(); + if ((className == "") || (className == null)) { + throw new HiveException( + "SerDe class or the SerDe class name is not set for table: " + + tableDesc.getProperties().getProperty("name")); + } + sdclass = hconf.getClassByName(className); + } + Deserializer tblDeserializer = (Deserializer) sdclass.newInstance(); + tblDeserializer.initialize(hconf, tblProps); + tblRawRowObjectInspector = + (StructObjectInspector) ObjectInspectorConverters.getConvertedOI( + partRawRowObjectInspector, + (StructObjectInspector) tblDeserializer.getObjectInspector()); + + if (identityConverterTableDesc.contains(tableDesc)) { + if (partRawRowObjectInspector != tblRawRowObjectInspector) { + identityConverterTableDesc.remove(tableDesc); + } + } + else if (partRawRowObjectInspector == tblRawRowObjectInspector) { + identityConverterTableDesc.add(tableDesc); + } + + tableDescOI.put(tableDesc, tblRawRowObjectInspector); + } + } + } catch (Exception e) { + throw new HiveException(e); + } + return tableDescOI; + } + public void setChildren(Configuration hconf) throws HiveException { Path fpath = new Path((new Path(HiveConf.getVar(hconf, @@ -375,10 +470,10 @@ operatorToPaths = new HashMap, ArrayList>(); statsMap.put(Counter.DESERIALIZE_ERRORS, deserialize_error_count); - + Map convertedOI = getConvertedOI(hconf); try { for (String onefile : conf.getPathToAliases().keySet()) { - MapOpCtx opCtx = initObjectInspector(conf, hconf, onefile); + MapOpCtx opCtx = initObjectInspector(conf, hconf, onefile, convertedOI); Path onepath = new Path(new Path(onefile).toUri().getPath()); List aliases = conf.getPathToAliases().get(onefile); @@ -514,16 +609,18 @@ Object row = null; try { if (this.hasVC) { - this.rowWithPartAndVC[0] = deserializer.deserialize(value); + this.rowWithPartAndVC[0] = + partTblObjectInspectorConverter.convert(deserializer.deserialize(value)); int vcPos = isPartitioned ? 2 : 1; if (context != null) { populateVirtualColumnValues(context, vcs, vcValues, deserializer); } this.rowWithPartAndVC[vcPos] = this.vcValues; } else if (!isPartitioned) { - row = deserializer.deserialize((Writable) value); + row = partTblObjectInspectorConverter.convert(deserializer.deserialize((Writable) value)); } else { - rowWithPart[0] = deserializer.deserialize((Writable) value); + rowWithPart[0] = + partTblObjectInspectorConverter.convert(deserializer.deserialize((Writable) value)); } } catch (Exception e) { // Serialize the row and output. @@ -542,22 +639,22 @@ try { if (this.hasVC) { - forward(this.rowWithPartAndVC, this.rowObjectInspector); + forward(this.rowWithPartAndVC, this.tblRowObjectInspector); } else if (!isPartitioned) { - forward(row, rowObjectInspector); + forward(row, tblRowObjectInspector); } else { - forward(rowWithPart, rowObjectInspector); + forward(rowWithPart, tblRowObjectInspector); } } catch (Exception e) { // Serialize the row and output the error message. String rowString; try { if (this.hasVC) { - rowString = SerDeUtils.getJSONString(rowWithPartAndVC, rowObjectInspector); + rowString = SerDeUtils.getJSONString(rowWithPartAndVC, tblRowObjectInspector); } else if (!isPartitioned) { - rowString = SerDeUtils.getJSONString(row, rowObjectInspector); + rowString = SerDeUtils.getJSONString(row, tblRowObjectInspector); } else { - rowString = SerDeUtils.getJSONString(rowWithPart, rowObjectInspector); + rowString = SerDeUtils.getJSONString(rowWithPart, tblRowObjectInspector); } } catch (Exception e2) { rowString = "[Error getting row data with exception " + Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java (revision 1432949) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java (working copy) @@ -109,8 +109,6 @@ dummyOp.setExecContext(execContext); dummyOp.initialize(jc,null); } - - } catch (Throwable e) { abort = true; if (e instanceof OutOfMemoryError) { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (revision 1432949) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (working copy) @@ -50,6 +50,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.DelegatedObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; @@ -92,6 +94,9 @@ private transient Writable value; private transient Writable[] vcValues; private transient Deserializer serde; + private transient Deserializer tblSerde; + Converter partTblObjectInspectorConverter; + private transient Iterator iterPath; private transient Iterator iterPartDesc; private transient Path currPath; @@ -223,17 +228,29 @@ private StructObjectInspector setTableDesc(TableDesc table) throws Exception { Deserializer serde = table.getDeserializerClass().newInstance(); serde.initialize(job, table.getProperties()); - return createRowInspector(getCurrent(serde)); + return createRowInspector(getCurrent(serde.getObjectInspector())); } - private StructObjectInspector setPrtnDesc(PartitionDesc partition) throws Exception { + private StructObjectInspector setPrtnDesc(PartitionDesc partition, ObjectInspector outputOI) + throws Exception { Deserializer serde = partition.getDeserializerClass().newInstance(); serde.initialize(job, partition.getProperties()); + String pcols = partition.getTableDesc().getProperties().getProperty( org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS); String[] partKeys = pcols.trim().split("/"); row[1] = createPartValue(partKeys, partition.getPartSpec()); - return createRowInspector(getCurrent(serde), partKeys); + + if (outputOI == null) { + Deserializer tblSerde = partition.getTableDesc().getDeserializerClass().newInstance(); + tblSerde.initialize(job, partition.getTableDesc().getProperties()); + + outputOI = ObjectInspectorConverters.getConvertedOI( + serde.getObjectInspector(), + tblSerde.getObjectInspector()); + } + + return createRowInspector(getCurrent(outputOI), partKeys); } private StructObjectInspector setPrtnDesc(TableDesc table) throws Exception { @@ -243,11 +260,10 @@ org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS); String[] partKeys = pcols.trim().split("/"); row[1] = null; - return createRowInspector(getCurrent(serde), partKeys); + return createRowInspector(getCurrent(serde.getObjectInspector()), partKeys); } - private StructObjectInspector getCurrent(Deserializer serde) throws SerDeException { - ObjectInspector current = serde.getObjectInspector(); + private StructObjectInspector getCurrent(ObjectInspector current) throws SerDeException { if (objectInspector != null) { current = DelegatedObjectInspectorFactory.reset(objectInspector, current); } else { @@ -384,6 +400,21 @@ serde = tmp.getDeserializerClass().newInstance(); serde.initialize(job, tmp.getProperties()); + if (currTbl != null) { + tblSerde = serde; + } + else { + tblSerde = currPart.getTableDesc().getDeserializerClass().newInstance(); + tblSerde.initialize(job, currPart.getTableDesc().getProperties()); + } + + ObjectInspector outputOI = ObjectInspectorConverters.getConvertedOI( + serde.getObjectInspector(), + tblSerde.getObjectInspector()); + + partTblObjectInspectorConverter = ObjectInspectorConverters.getConverter( + serde.getObjectInspector(), outputOI); + if (LOG.isDebugEnabled()) { LOG.debug("Creating fetchTask with deserializer typeinfo: " + serde.getObjectInspector().getTypeName()); @@ -391,7 +422,7 @@ } if (currPart != null) { - setPrtnDesc(currPart); + setPrtnDesc(currPart, outputOI); } } @@ -503,14 +534,15 @@ vcValues = MapOperator.populateVirtualColumnValues(context, vcCols, vcValues, serde); row[isPartitioned ? 2 : 1] = vcValues; } - row[0] = serde.deserialize(value); + row[0] = partTblObjectInspectorConverter.convert(serde.deserialize(value)); + if (hasVC || isPartitioned) { inspectable.o = row; inspectable.oi = rowObjectInspector; return inspectable; } inspectable.o = row[0]; - inspectable.oi = serde.getObjectInspector(); + inspectable.oi = tblSerde.getObjectInspector(); return inspectable; } else { currRecReader.close(); @@ -575,7 +607,7 @@ if (listParts == null || listParts.isEmpty()) { return setPrtnDesc(work.getTblDesc()); } - return setPrtnDesc(listParts.get(0)); + return setPrtnDesc(listParts.get(0), null); } catch (Exception e) { throw new HiveException("Failed with exception " + e.getMessage() + org.apache.hadoop.util.StringUtils.stringifyException(e)); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java (revision 1432949) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java (working copy) @@ -30,6 +30,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.persistence.RowContainer; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -40,7 +41,6 @@ import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.SMBJoinDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; -import org.apache.hadoop.hive.ql.util.ObjectPair; import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; Index: ql/src/java/org/apache/hadoop/hive/ql/util/ObjectPair.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/util/ObjectPair.java (revision 1432949) +++ ql/src/java/org/apache/hadoop/hive/ql/util/ObjectPair.java (working copy) @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.util; - -public class ObjectPair { - private F first; - private S second; - - public ObjectPair() {} - - public ObjectPair(F first, S second) { - this.first = first; - this.second = second; - } - - public F getFirst() { - return first; - } - - public void setFirst(F first) { - this.first = first; - } - - public S getSecond() { - return second; - } - - public void setSecond(S second) { - this.second = second; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java (revision 1432949) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java (working copy) @@ -87,7 +87,7 @@ public PartitionDesc(final org.apache.hadoop.hive.ql.metadata.Partition part) throws HiveException { tableDesc = Utilities.getTableDesc(part.getTable()); - properties = part.getSchema(); + properties = part.getSchemaFromPartitionSchema(); partSpec = part.getSpec(); deserializerClass = part.getDeserializer(properties).getClass(); inputFileFormatClass = part.getInputFormatClass(); Index: ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java (revision 1432949) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java (working copy) @@ -20,9 +20,9 @@ import java.io.Serializable; import java.util.Enumeration; -import java.util.Properties; import java.util.LinkedHashMap; import java.util.Map; +import java.util.Properties; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; @@ -149,7 +149,7 @@ org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE) != null); } - + @Override public Object clone() { TableDesc ret = new TableDesc(); @@ -170,4 +170,42 @@ } return ret; } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((deserializerClass == null) ? 0 : deserializerClass.hashCode()); + result = prime * result + + ((inputFileFormatClass == null) ? 0 : inputFileFormatClass.hashCode()); + result = prime * result + + ((outputFileFormatClass == null) ? 0 : outputFileFormatClass.hashCode()); + result = prime * result + ((properties == null) ? 0 : properties.hashCode()); + result = prime * result + ((serdeClassName == null) ? 0 : serdeClassName.hashCode()); + result = prime * result + ((jobProperties == null) ? 0 : jobProperties.hashCode()); + return result; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof TableDesc)) { + return false; + } + + TableDesc target = (TableDesc) o; + boolean ret = true; + ret = ret && (deserializerClass == null ? target.deserializerClass == null : + deserializerClass.equals(target.deserializerClass)); + ret = ret && (inputFileFormatClass == null ? target.inputFileFormatClass == null : + inputFileFormatClass.equals(target.inputFileFormatClass)); + ret = ret && (outputFileFormatClass == null ? target.outputFileFormatClass == null : + outputFileFormatClass.equals(target.outputFileFormatClass)); + ret = ret && (properties == null ? target.properties == null : + properties.equals(target.properties)); + ret = ret && (serdeClassName == null ? target.serdeClassName == null : + serdeClassName.equals(target.serdeClassName)); + ret = ret && (jobProperties == null ? target.jobProperties == null : + jobProperties.equals(target.jobProperties)); + return ret; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 1432949) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JavaUtils; +import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.TableType; @@ -165,7 +166,6 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFHash; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr; import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF; -import org.apache.hadoop.hive.ql.util.ObjectPair; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe;