v40x>@ji&V@>ewdt`n39z8
oh%xZL1S3lXqjkj`Zyzr&A1@{b4t6H*iaEw6Oia!UERu}c0Ep33{Qv*}
literal 0
HcmV?d00001
diff --git a/data/files/test_v6dot0_uncompressed.rc b/data/files/test_v6dot0_uncompressed.rc
new file mode 100755
index 0000000000000000000000000000000000000000..1232d28503da91e6aa4882b33fc04659af0e3e11
GIT binary patch
literal 190
zcmWG`4P?{JFG|--EJ#ewNY%?oOv%qL(96gyOVumP(aX%&3vzbL%t=-8POWq*O-oBH
z(kDiBSYl3TDnva42r$ZkO#~WOl$-`MPA@q>r!+TDFRwH=DYb~v=(gw9#C&xRd)Kw*
lJofK|&j7VZ12I1cv#_zTFfuVRfvJCtiAhO7WM~8gh5(^-IaL4v
literal 0
HcmV?d00001
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
index eb5305b..49d39e7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
@@ -73,7 +73,7 @@ import org.apache.hadoop.util.ReflectionUtils;
4 * 1024 * 1024) .
*
* RCFile provides {@link Writer}, {@link Reader} and classes for
- * writing, reading respectively.
+ * writing, reading respectively. These classes are not thread-safe.
*
*
*
@@ -86,9 +86,8 @@ import org.apache.hadoop.util.ReflectionUtils;
*
*
* RCFile compresses values in a more fine-grained manner then record level
- * compression. However, It currently does not support compress the key part
- * yet. The actual compression algorithm used to compress key and/or values can
- * be specified by using the appropriate {@link CompressionCodec}.
+ * compression. The actual compression algorithm used to compress key and/or
+ * values can be specified by using the appropriate {@link CompressionCodec}.
*
*
*
@@ -111,37 +110,59 @@ import org.apache.hadoop.util.ReflectionUtils;
*
compression codec - CompressionCodec class which is used for
* compression of keys and/or values (if compression is enabled).
* metadata - {@link Metadata} for this file.
+ *
+ * - minor version - The minor version of the file format
+ * - number of columns - the number of columns in the data
+ *
* sync - A sync marker to denote end of the header.
*
*
* RCFile Format
*
- * - Header
- * - Record
- *
- Key part
- *
- * - Record length in bytes
- * - Key length in bytes
- * - Number_of_rows_in_this_record(vint)
- * - Column_1_ondisk_length(vint)
- * - Column_1_row_1_value_plain_length
- * - Column_1_row_2_value_plain_length
- * - ...
- * - Column_2_ondisk_length(vint)
- * - Column_2_row_1_value_plain_length
- * - Column_2_row_2_value_plain_length
- * - ...
- *
- *
- *
- * - Value part
- *
- * - Compressed or plain data of [column_1_row_1_value,
- * column_1_row_2_value,....]
- * - Compressed or plain data of [column_2_row_1_value,
- * column_2_row_2_value,....]
- *
- *
+ * - Header
+ *
- Records, one or more
+ *
+ * - Record Header
+ *
+ * - Record Body length in bytes
+ *
- Key length in bytes
+ *
+ * - Record Body
+ *
+ * - Key
+ *
+ * - Key contents' plain length in bytes
+ *
- Key contents, plain or compressed
+ *
+ * - Number_of_rows_in_this_record(vint)
+ *
- Column_1_ondisk_length(vint)
+ *
- Column_1_plain_length(vint)
+ *
- RL-encoded Column_1_row_1_value_plain_length
+ *
- RL-encoded Column_1_row_2_value_plain_length
+ *
- ...
+ *
- Column_2_ondisk_length(vint)
+ *
- Column_2_plain_length(vint)
+ *
- RL-encoded Column_2_row_1_value_plain_length
+ *
- RL-encoded Column_2_row_2_value_plain_length
+ *
- ...
+ *
+ *
+ * - Value
+ *
+ * - Compressed or plain data of [column_1_row_1_value,
+ * column_1_row_2_value,....]
+ *
- Compressed or plain data of [column_2_row_1_value,
+ * column_2_row_2_value,....]
+ *
+ *
+ *
+ * - Sync block
+ *
+ * - sync escape
+ *
- sync
+ *
+ * - Records, one or more
+ *
- ...
*
*
*/
@@ -153,6 +174,8 @@ public class RCFile {
public static final String COLUMN_NUMBER_METADATA_STR = "hive.io.rcfile.column.number";
+ public static final String RCFILE_VERSION_METADATA_STR = "hive.io.rcfile.minor.version";
+
public static final String COLUMN_NUMBER_CONF_STR = "hive.io.rcfile.column.number.conf";
/*
@@ -164,6 +187,8 @@ public class RCFile {
(byte) 'S', (byte) 'E', (byte) 'Q', VERSION_WITH_METADATA
};
+ private static final byte RCFILE_MINOR_VERSION = (byte) 1;
+
private static final int SYNC_ESCAPE = -1; // "length" of sync entries
private static final int SYNC_HASH_SIZE = 16; // number of bytes in hash
private static final int SYNC_SIZE = 4 + SYNC_HASH_SIZE; // escape + hash
@@ -171,6 +196,7 @@ public class RCFile {
/** The number of bytes between sync points. */
public static final int SYNC_INTERVAL = 100 * SYNC_SIZE;
+ public static final int INT_SIZE = Integer.SIZE/8;
/**
* KeyBuffer is the key of each record in RCFile. Its on-disk layout is as
* below:
@@ -732,6 +758,8 @@ public class RCFile {
if (isCompressed()) {
Text.writeString(out, (codec.getClass()).getName());
}
+ metadata.set(new Text(RCFILE_VERSION_METADATA_STR),
+ new Text(Byte.toString(RCFILE_MINOR_VERSION)));
metadata.write(out);
}
@@ -866,30 +894,7 @@ public class RCFile {
columnValuePlainLength[columnIndex] = 0;
}
- int keyLength = key.getSize();
- if (keyLength < 0) {
- throw new IOException("negative length keys not allowed: " + key);
- }
-
- // Write the record out
- checkAndWriteSync(); // sync
- out.writeInt(keyLength + valueLength); // total record length
- out.writeInt(keyLength); // key portion length
- if (!isCompressed()) {
- out.writeInt(keyLength);
- key.write(out); // key
- } else {
- keyCompressionBuffer.reset();
- keyDeflateFilter.resetState();
- key.write(keyDeflateOut);
- keyDeflateOut.flush();
- keyDeflateFilter.finish();
- int compressedKeyLen = keyCompressionBuffer.getLength();
- out.writeInt(compressedKeyLen);
- out.write(keyCompressionBuffer.getData(), 0, compressedKeyLen);
- }
- value.write(out); // value
-
+ flushBlock(key, value, valueLength);
// clear the columnBuffers
clearColumnBuffers();
@@ -901,11 +906,14 @@ public class RCFile {
* flush a block out without doing anything except compressing the key part.
*/
public void flushBlock(KeyBuffer keyBuffer, ValueBuffer valueBuffer,
- int recordLen, int keyLength, int compressedKeyLen) throws IOException {
+ int valueLength) throws IOException {
checkAndWriteSync(); // sync
- out.writeInt(recordLen); // total record length
- out.writeInt(keyLength); // key portion length
+ int plainkeyContentsLength = keyBuffer.getSize();
+ if (plainkeyContentsLength < 0) {
+ throw new IOException("negative length keys not allowed: " + keyBuffer);
+ }
+ int compressedKeyLen;
if(this.isCompressed()) {
//compress key and write key out
keyCompressionBuffer.reset();
@@ -913,11 +921,18 @@ public class RCFile {
keyBuffer.write(keyDeflateOut);
keyDeflateOut.flush();
keyDeflateFilter.finish();
- compressedKeyLen = keyCompressionBuffer.getLength();
- out.writeInt(compressedKeyLen);
- out.write(keyCompressionBuffer.getData(), 0, compressedKeyLen);
+ compressedKeyLen = INT_SIZE + keyCompressionBuffer.getLength();
+ } else {
+ compressedKeyLen = INT_SIZE + plainkeyContentsLength;
+ }
+ out.writeInt(compressedKeyLen + valueLength); // total record length
+ out.writeInt(compressedKeyLen); // key portion length
+
+ //write key contents
+ out.writeInt(plainkeyContentsLength);
+ if (this.isCompressed()) {
+ out.write(keyCompressionBuffer.getData(), 0, keyCompressionBuffer.getLength());
} else {
- out.writeInt(compressedKeyLen);
keyBuffer.write(out);
}
@@ -930,7 +945,7 @@ public class RCFile {
}
}
- public synchronized void close() throws IOException {
+ public void close() throws IOException {
if (bufferedRecords > 0) {
flushRecords();
}
@@ -969,6 +984,7 @@ public class RCFile {
private final FSDataInputStream in;
private byte version;
+ private byte rcfileMinorVersion;
private CompressionCodec codec = null;
private Metadata metadata = null;
@@ -1168,9 +1184,14 @@ public class RCFile {
keyDecompressor = CodecPool.getDecompressor(codec);
}
+ rcfileMinorVersion = 0;
metadata = new Metadata();
if (version >= VERSION_WITH_METADATA) { // if version >= 6
metadata.readFields(in);
+ Text rcfv = metadata.get(new Text(RCFILE_VERSION_METADATA_STR));
+ if (rcfv != null) {
+ rcfileMinorVersion = (byte)Integer.parseInt(rcfv.toString());
+ }
}
if (version > 1) { // if version > 1
@@ -1180,7 +1201,7 @@ public class RCFile {
}
/** Return the current byte position in the input file. */
- public synchronized long getPosition() throws IOException {
+ public long getPosition() throws IOException {
return in.getPos();
}
@@ -1194,12 +1215,12 @@ public class RCFile {
* words, the current seek can only seek to the end of the file. For other
* positions, use {@link RCFile.Reader#sync(long)}.
*/
- public synchronized void seek(long position) throws IOException {
+ public void seek(long position) throws IOException {
in.seek(position);
}
/** Seek to the next sync mark past a given position. */
- public synchronized void sync(long position) throws IOException {
+ public void sync(long position) throws IOException {
if (position + SYNC_SIZE >= end) {
seek(end);
return;
@@ -1262,7 +1283,7 @@ public class RCFile {
* @return the length of the next record or -1 if there is no next record
* @throws IOException
*/
- private synchronized int readRecordLength() throws IOException {
+ private int readRecordLength() throws IOException {
if (in.getPos() >= end) {
return -1;
}
@@ -1295,7 +1316,6 @@ public class RCFile {
}
}
- private int compressedKeyLen = 0;
NonSyncDataInputBuffer keyDataIn = new NonSyncDataInputBuffer();
NonSyncDataInputBuffer keyDecompressBuffer = new NonSyncDataInputBuffer();
NonSyncDataOutputBuffer keyTempBuffer = new NonSyncDataOutputBuffer();
@@ -1311,18 +1331,26 @@ public class RCFile {
return -1;
}
currentKeyLength = in.readInt();
- compressedKeyLen = in.readInt();
+ int plainKeyContentsLength = in.readInt();
+ if ((version < 6) || (version == 6) && (rcfileMinorVersion < 1)) {
+ int temp = plainKeyContentsLength;
+ plainKeyContentsLength = currentKeyLength;
+ currentKeyLength = INT_SIZE + temp;
+ currentRecordLength = currentRecordLength - plainKeyContentsLength
+ + currentKeyLength;
+ }
if (decompress) {
+ int compressedKeyContentsLength = currentKeyLength - INT_SIZE;
keyTempBuffer.reset();
- keyTempBuffer.write(in, compressedKeyLen);
- keyDecompressBuffer.reset(keyTempBuffer.getData(), compressedKeyLen);
+ keyTempBuffer.write(in, compressedKeyContentsLength);
+ keyDecompressBuffer.reset(keyTempBuffer.getData(), compressedKeyContentsLength);
CompressionInputStream deflatFilter = codec.createInputStream(
keyDecompressBuffer, keyDecompressor);
DataInputStream compressedIn = new DataInputStream(deflatFilter);
deflatFilter.resetState();
keyDecompressedData.reset();
- keyDecompressedData.write(compressedIn, currentKeyLength);
- keyDataIn.reset(keyDecompressedData.getData(), currentKeyLength);
+ keyDecompressedData.write(compressedIn, plainKeyContentsLength);
+ keyDataIn.reset(keyDecompressedData.getData(), plainKeyContentsLength);
currentKey.readFields(keyDataIn);
} else {
currentKey.readFields(in);
@@ -1427,7 +1455,7 @@ public class RCFile {
* @return whether there still has records or not
* @throws IOException
*/
- public synchronized boolean nextColumnsBatch() throws IOException {
+ public boolean nextColumnsBatch() throws IOException {
passedRowsNum += (recordsNumInValBuffer - readRowsIndexInBuffer);
return nextKeyBuffer() > 0;
}
@@ -1442,7 +1470,7 @@ public class RCFile {
* @return next row number
* @throws IOException
*/
- public synchronized boolean next(LongWritable readRows) throws IOException {
+ public boolean next(LongWritable readRows) throws IOException {
if (hasRecordsInBuffer()) {
readRows.set(passedRowsNum);
readRowsIndexInBuffer++;
@@ -1475,7 +1503,7 @@ public class RCFile {
*
* @throws IOException
*/
- public synchronized void getCurrentRow(BytesRefArrayWritable ret) throws IOException {
+ public void getCurrentRow(BytesRefArrayWritable ret) throws IOException {
if (!keyInit || rowFetched) {
return;
@@ -1579,19 +1607,9 @@ public class RCFile {
return this.currentValue;
}
- //return the current block's length
- public int getCurrentBlockLength() {
- return this.currentRecordLength;
- }
-
- //return the current block's key length
- public int getCurrentKeyLength() {
- return this.currentKeyLength;
- }
-
- //return the current block's compressed key length
- public int getCurrentCompressedKeyLen() {
- return this.compressedKeyLen;
+ //return the current record's value length
+ public int getCurrentValueLength() {
+ return currentRecordLength - currentKeyLength;
}
//return the CompressionCodec used for this file
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileBlockMergeRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileBlockMergeRecordReader.java
index 20d1f4e..b9300fe 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileBlockMergeRecordReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileBlockMergeRecordReader.java
@@ -89,9 +89,7 @@ public class RCFileBlockMergeRecordReader implements
}
keyWrapper.keyBuffer = this.in.getCurrentKeyBufferObj();
- keyWrapper.recordLength = this.in.getCurrentBlockLength();
- keyWrapper.keyLength = this.in.getCurrentKeyLength();
- keyWrapper.compressedKeyLength = this.in.getCurrentCompressedKeyLen();
+ keyWrapper.valueLength = this.in.getCurrentValueLength();
keyWrapper.codec = this.in.getCompressionCodec();
valueWrapper.valueBuffer = this.in.getCurrentValueBufferObj();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileKeyBufferWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileKeyBufferWrapper.java
index f7eacdc..dabf991 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileKeyBufferWrapper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileKeyBufferWrapper.java
@@ -30,9 +30,7 @@ public class RCFileKeyBufferWrapper implements
WritableComparable {
protected KeyBuffer keyBuffer;
- protected int recordLength;
- protected int keyLength;
- protected int compressedKeyLength;
+ protected int valueLength;
protected CompressionCodec codec;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java
index bb1e3c9..1c5e8cc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java
@@ -108,8 +108,7 @@ public class RCFileMergeMapper extends MapReduceBase implements
"RCFileMerge failed because the input files use different CompressionCodec or have different column number setting.");
}
- outWriter.flushBlock(key.keyBuffer, value.valueBuffer, key.recordLength,
- key.keyLength, key.compressedKeyLength);
+ outWriter.flushBlock(key.keyBuffer, value.valueBuffer, key.valueLength);
} catch (Throwable e) {
this.exception = true;
close();
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
index 8bb6f3a..2bc1750 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
@@ -134,7 +134,12 @@ public class TestRCFile extends TestCase {
} catch (UnsupportedEncodingException e) {
}
}
-
+
+ @Override
+ protected void setUp() throws Exception {
+ ColumnProjectionUtils.setFullyReadColumns(conf);
+ }
+
public void testSimpleReadAndWrite() throws IOException, SerDeException {
fs.delete(file, true);
@@ -386,6 +391,72 @@ public class TestRCFile extends TestCase {
long cost = System.currentTimeMillis() - start;
LOG.debug("reading fully costs:" + cost + " milliseconds");
}
+
+ public void testReadv6dot0Uncompressed() throws IOException, SerDeException {
+
+ Text[][] records = {
+ {new Text("a"), new Text("1")},
+ {new Text("bb"), new Text("22")},
+ {new Text("a"), new Text("22")},
+ {new Text("bb"), new Text("1")}
+ };
+ int numRows = records.length;
+ int numCols = records[0].length;
+
+ Path dir = new Path(System.getProperty("test.src.data.dir", "data"), "files");
+
+ Path rcfile = new Path(dir, "test_v6dot0_uncompressed.rc");
+ RCFile.Reader rcreader = new RCFile.Reader(fs, rcfile, conf);
+ for (int i = 0; i < numRows; i++) {
+ LongWritable rowID = new LongWritable();
+ rcreader.next(rowID);
+ BytesRefArrayWritable cols = new BytesRefArrayWritable();
+ rcreader.getCurrentRow(cols);
+ cols.resetValid(numCols);
+ for (int j = 0; j < numCols; j++) {
+ byte[] expected = records[i][j].getBytes();
+ byte[] actual = cols.get(j).getBytesCopy();
+ assertEquals(expected.length, actual.length);
+ for (int k = 0; k < expected.length; ++k) {
+ assertEquals(expected[k], actual[k]);
+ }
+ }
+ }
+ rcreader.close();
+ }
+
+ public void testReadv6dot0Compressed() throws IOException, SerDeException {
+
+ Text[][] records = {
+ {new Text("a"), new Text("1")},
+ {new Text("bb"), new Text("22")},
+ {new Text("a"), new Text("22")},
+ {new Text("bb"), new Text("1")}
+ };
+ int numRows = records.length;
+ int numCols = records[0].length;
+
+ Path dir = new Path(System.getProperty("test.src.data.dir", "data"), "files");
+
+ Path rcfile = new Path(dir, "test_v6dot0_compressed.rc");
+ RCFile.Reader rcreader = new RCFile.Reader(fs, rcfile, conf);
+ for (int i = 0; i < numRows; i++) {
+ LongWritable rowID = new LongWritable();
+ rcreader.next(rowID);
+ BytesRefArrayWritable cols = new BytesRefArrayWritable();
+ rcreader.getCurrentRow(cols);
+ cols.resetValid(numCols);
+ for (int j = 0; j < numCols; j++) {
+ byte[] expected = records[i][j].getBytes();
+ byte[] actual = cols.get(j).getBytesCopy();
+ assertEquals(expected.length, actual.length);
+ for (int k = 0; k < expected.length; ++k) {
+ assertEquals(expected[k], actual[k]);
+ }
+ }
+ }
+ rcreader.close();
+ }
public void testSynAndSplit() throws IOException {
splitBeforeSync();
diff --git a/ql/src/test/results/clientpositive/alter_merge.q.out b/ql/src/test/results/clientpositive/alter_merge.q.out
index 25f36c0..a4c1781 100644
--- a/ql/src/test/results/clientpositive/alter_merge.q.out
+++ b/ql/src/test/results/clientpositive/alter_merge.q.out
@@ -38,25 +38,25 @@ totalFileSize:636
maxFileSize:222
minFileSize:206
lastAccessTime:0
-lastUpdateTime:1300680902000
+lastUpdateTime:1301307548000
PREHOOK: query: select count(1) from src_rc_merge_test
PREHOOK: type: QUERY
PREHOOK: Input: default@src_rc_merge_test
-PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-15-02_678_8520633473198390721/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-19-09_231_2198985670130000507/-mr-10000
POSTHOOK: query: select count(1) from src_rc_merge_test
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src_rc_merge_test
-POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-15-02_678_8520633473198390721/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-19-09_231_2198985670130000507/-mr-10000
15
PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test
PREHOOK: type: QUERY
PREHOOK: Input: default@src_rc_merge_test
-PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-15-10_671_3103325352622055634/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-19-29_311_966801859426201691/-mr-10000
POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src_rc_merge_test
-POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-15-10_671_3103325352622055634/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-19-29_311_966801859426201691/-mr-10000
214 -7678496319
PREHOOK: query: alter table src_rc_merge_test concatenate
PREHOOK: type: ALTER_TABLE_MERGE
@@ -79,29 +79,29 @@ columns:struct columns { i32 key, string value}
partitioned:false
partitionColumns:
totalNumberFiles:1
-totalFileSize:334
-maxFileSize:334
-minFileSize:334
+totalFileSize:365
+maxFileSize:365
+minFileSize:365
lastAccessTime:0
-lastUpdateTime:1300680922000
+lastUpdateTime:1301307580000
PREHOOK: query: select count(1) from src_rc_merge_test
PREHOOK: type: QUERY
PREHOOK: Input: default@src_rc_merge_test
-PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-15-23_887_8242125214309556968/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-19-42_625_6930629717019904929/-mr-10000
POSTHOOK: query: select count(1) from src_rc_merge_test
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src_rc_merge_test
-POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-15-23_887_8242125214309556968/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-19-42_625_6930629717019904929/-mr-10000
15
PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test
PREHOOK: type: QUERY
PREHOOK: Input: default@src_rc_merge_test
-PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-15-31_244_4854539946976169821/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-19-58_205_5938290326044947890/-mr-10000
POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src_rc_merge_test
-POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-15-31_244_4854539946976169821/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-19-58_205_5938290326044947890/-mr-10000
214 -7678496319
PREHOOK: query: create table src_rc_merge_test_part(key int, value string) partitioned by (ds string) stored as rcfile
PREHOOK: type: CREATETABLE
@@ -150,25 +150,25 @@ totalFileSize:636
maxFileSize:222
minFileSize:206
lastAccessTime:0
-lastUpdateTime:1300680946000
+lastUpdateTime:1301307608000
PREHOOK: query: select count(1) from src_rc_merge_test_part
PREHOOK: type: QUERY
PREHOOK: Input: default@src_rc_merge_test_part@ds=2011
-PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-15-47_140_6546800621174301781/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-20-09_216_4859589248335187490/-mr-10000
POSTHOOK: query: select count(1) from src_rc_merge_test_part
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src_rc_merge_test_part@ds=2011
-POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-15-47_140_6546800621174301781/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-20-09_216_4859589248335187490/-mr-10000
15
PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part
PREHOOK: type: QUERY
PREHOOK: Input: default@src_rc_merge_test_part@ds=2011
-PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-15-58_930_7643638670315828175/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-20-17_812_7453237252447967192/-mr-10000
POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src_rc_merge_test_part@ds=2011
-POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-15-58_930_7643638670315828175/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-20-17_812_7453237252447967192/-mr-10000
214 -7678496319
PREHOOK: query: alter table src_rc_merge_test_part partition (ds='2011') concatenate
PREHOOK: type: ALTER_PARTITION_MERGE
@@ -191,29 +191,29 @@ columns:struct columns { i32 key, string value}
partitioned:true
partitionColumns:struct partition_columns { string ds}
totalNumberFiles:1
-totalFileSize:334
-maxFileSize:334
-minFileSize:334
+totalFileSize:365
+maxFileSize:365
+minFileSize:365
lastAccessTime:0
-lastUpdateTime:1300680970000
+lastUpdateTime:1301307633000
PREHOOK: query: select count(1) from src_rc_merge_test_part
PREHOOK: type: QUERY
PREHOOK: Input: default@src_rc_merge_test_part@ds=2011
-PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-16-11_200_6628817362773769157/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-20-34_091_154347906019109002/-mr-10000
POSTHOOK: query: select count(1) from src_rc_merge_test_part
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src_rc_merge_test_part@ds=2011
-POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-16-11_200_6628817362773769157/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-20-34_091_154347906019109002/-mr-10000
15
PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part
PREHOOK: type: QUERY
PREHOOK: Input: default@src_rc_merge_test_part@ds=2011
-PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-16-17_532_734781450927092021/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-20-41_781_3510087503600279899/-mr-10000
POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src_rc_merge_test_part@ds=2011
-POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-20_21-16-17_532_734781450927092021/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-20-41_781_3510087503600279899/-mr-10000
214 -7678496319
PREHOOK: query: drop table src_rc_merge_test
PREHOOK: type: DROPTABLE
diff --git a/ql/src/test/results/clientpositive/alter_merge_stats.q.out b/ql/src/test/results/clientpositive/alter_merge_stats.q.out
index 243f7cc..7cb7021 100644
--- a/ql/src/test/results/clientpositive/alter_merge_stats.q.out
+++ b/ql/src/test/results/clientpositive/alter_merge_stats.q.out
@@ -38,7 +38,7 @@ totalFileSize:636
maxFileSize:222
minFileSize:206
lastAccessTime:0
-lastUpdateTime:1300680991000
+lastUpdateTime:1301307858000
PREHOOK: query: desc extended src_rc_merge_test_stat
PREHOOK: type: DESCTABLE
@@ -47,7 +47,7 @@ POSTHOOK: type: DESCTABLE
key int from deserializer
value string from deserializer
-Detailed Table Information Table(tableName:src_rc_merge_test_stat, dbName:default, owner:krishnak, createTime:1300680988, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/src_rc_merge_test_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1300680991}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
+Detailed Table Information Table(tableName:src_rc_merge_test_stat, dbName:default, owner:krishnak, createTime:1301307856, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/src_rc_merge_test_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1301307858}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
PREHOOK: query: analyze table src_rc_merge_test_stat compute statistics
PREHOOK: type: QUERY
PREHOOK: Input: default@src_rc_merge_test_stat
@@ -63,7 +63,7 @@ POSTHOOK: type: DESCTABLE
key int from deserializer
value string from deserializer
-Detailed Table Information Table(tableName:src_rc_merge_test_stat, dbName:default, owner:krishnak, createTime:1300680988, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/src_rc_merge_test_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=3, transient_lastDdlTime=1300680999, numRows=6, totalSize=636}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
+Detailed Table Information Table(tableName:src_rc_merge_test_stat, dbName:default, owner:krishnak, createTime:1301307856, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/src_rc_merge_test_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=3, transient_lastDdlTime=1301307870, numRows=6, totalSize=636}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
PREHOOK: query: alter table src_rc_merge_test_stat concatenate
PREHOOK: type: ALTER_TABLE_MERGE
PREHOOK: Input: default@src_rc_merge_test_stat
@@ -85,11 +85,11 @@ columns:struct columns { i32 key, string value}
partitioned:false
partitionColumns:
totalNumberFiles:1
-totalFileSize:334
-maxFileSize:334
-minFileSize:334
+totalFileSize:365
+maxFileSize:365
+minFileSize:365
lastAccessTime:0
-lastUpdateTime:1300681001000
+lastUpdateTime:1301307873000
PREHOOK: query: desc extended src_rc_merge_test_stat
PREHOOK: type: DESCTABLE
@@ -98,7 +98,7 @@ POSTHOOK: type: DESCTABLE
key int from deserializer
value string from deserializer
-Detailed Table Information Table(tableName:src_rc_merge_test_stat, dbName:default, owner:krishnak, createTime:1300680988, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/src_rc_merge_test_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1300681002, numRows=6, totalSize=334}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
+Detailed Table Information Table(tableName:src_rc_merge_test_stat, dbName:default, owner:krishnak, createTime:1301307856, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/src_rc_merge_test_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1301307874, numRows=6, totalSize=365}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
PREHOOK: query: create table src_rc_merge_test_part_stat(key int, value string) partitioned by (ds string) stored as rcfile
PREHOOK: type: CREATETABLE
POSTHOOK: query: create table src_rc_merge_test_part_stat(key int, value string) partitioned by (ds string) stored as rcfile
@@ -146,7 +146,7 @@ totalFileSize:636
maxFileSize:222
minFileSize:206
lastAccessTime:0
-lastUpdateTime:1300681004000
+lastUpdateTime:1301307877000
PREHOOK: query: desc extended src_rc_merge_test_part_stat
PREHOOK: type: DESCTABLE
@@ -156,7 +156,7 @@ key int from deserializer
value string from deserializer
ds string
-Detailed Table Information Table(tableName:src_rc_merge_test_part_stat, dbName:default, owner:krishnak, createTime:1300681002, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/src_rc_merge_test_part_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{transient_lastDdlTime=1300681002}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
+Detailed Table Information Table(tableName:src_rc_merge_test_part_stat, dbName:default, owner:krishnak, createTime:1301307875, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/src_rc_merge_test_part_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{transient_lastDdlTime=1301307875}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
PREHOOK: query: analyze table src_rc_merge_test_part_stat partition(ds='2011') compute statistics
PREHOOK: type: QUERY
PREHOOK: Input: default@src_rc_merge_test_part_stat@ds=2011
@@ -175,7 +175,7 @@ key int from deserializer
value string from deserializer
ds string
-Detailed Table Information Table(tableName:src_rc_merge_test_part_stat, dbName:default, owner:krishnak, createTime:1300681002, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/src_rc_merge_test_part_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=1, numFiles=3, transient_lastDdlTime=1300681013, numRows=6, totalSize=636}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
+Detailed Table Information Table(tableName:src_rc_merge_test_part_stat, dbName:default, owner:krishnak, createTime:1301307875, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/src_rc_merge_test_part_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=1, numFiles=3, transient_lastDdlTime=1301307885, numRows=6, totalSize=636}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
PREHOOK: query: alter table src_rc_merge_test_part_stat partition (ds='2011') concatenate
PREHOOK: type: ALTER_PARTITION_MERGE
PREHOOK: Input: default@src_rc_merge_test_part_stat
@@ -197,11 +197,11 @@ columns:struct columns { i32 key, string value}
partitioned:true
partitionColumns:struct partition_columns { string ds}
totalNumberFiles:1
-totalFileSize:334
-maxFileSize:334
-minFileSize:334
+totalFileSize:365
+maxFileSize:365
+minFileSize:365
lastAccessTime:0
-lastUpdateTime:1300681015000
+lastUpdateTime:1301307888000
PREHOOK: query: desc extended src_rc_merge_test_part_stat
PREHOOK: type: DESCTABLE
@@ -211,7 +211,7 @@ key int from deserializer
value string from deserializer
ds string
-Detailed Table Information Table(tableName:src_rc_merge_test_part_stat, dbName:default, owner:krishnak, createTime:1300681002, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/src_rc_merge_test_part_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, transient_lastDdlTime=1300681015, numRows=6, totalSize=334}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
+Detailed Table Information Table(tableName:src_rc_merge_test_part_stat, dbName:default, owner:krishnak, createTime:1301307875, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/src_rc_merge_test_part_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, transient_lastDdlTime=1301307888, numRows=6, totalSize=365}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
PREHOOK: query: drop table src_rc_merge_test_stat
PREHOOK: type: DROPTABLE
PREHOOK: Input: default@src_rc_merge_test_stat
diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat.q.out
index cee2e72..d4050ce 100644
--- a/ql/src/test/results/clientpositive/partition_wise_fileformat.q.out
+++ b/ql/src/test/results/clientpositive/partition_wise_fileformat.q.out
@@ -20,8 +20,8 @@ POSTHOOK: type: SHOW_TABLESTATUS
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
tableName:partition_test_partitioned
-owner:njain
-location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/partition_test_partitioned
+owner:krishnak
+location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/partition_test_partitioned
inputformat:org.apache.hadoop.mapred.TextInputFormat
outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
columns:struct columns { string key, string value}
@@ -32,7 +32,7 @@ totalFileSize:216
maxFileSize:216
minFileSize:216
lastAccessTime:0
-lastUpdateTime:1282030124000
+lastUpdateTime:1301308029000
PREHOOK: query: show table extended like partition_test_partitioned partition(dt=100)
PREHOOK: type: SHOW_TABLESTATUS
@@ -41,8 +41,8 @@ POSTHOOK: type: SHOW_TABLESTATUS
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
tableName:partition_test_partitioned
-owner:njain
-location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/partition_test_partitioned/dt=100
+owner:krishnak
+location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/partition_test_partitioned/dt=100
inputformat:org.apache.hadoop.mapred.TextInputFormat
outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
columns:struct columns { string key, string value}
@@ -53,16 +53,16 @@ totalFileSize:216
maxFileSize:216
minFileSize:216
lastAccessTime:0
-lastUpdateTime:1282030124000
+lastUpdateTime:1301308029000
PREHOOK: query: select key from partition_test_partitioned where dt=100
PREHOOK: type: QUERY
PREHOOK: Input: default@partition_test_partitioned@dt=100
-PREHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-28-45_751_169876228483688017/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-27-10_534_3517294960259884011/-mr-10000
POSTHOOK: query: select key from partition_test_partitioned where dt=100
POSTHOOK: type: QUERY
POSTHOOK: Input: default@partition_test_partitioned@dt=100
-POSTHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-28-45_751_169876228483688017/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-27-10_534_3517294960259884011/-mr-10000
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
238
@@ -93,11 +93,11 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(s
PREHOOK: query: select key from partition_test_partitioned
PREHOOK: type: QUERY
PREHOOK: Input: default@partition_test_partitioned@dt=100
-PREHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-28-49_132_7250201183436014876/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-27-17_139_7616467668164915031/-mr-10000
POSTHOOK: query: select key from partition_test_partitioned
POSTHOOK: type: QUERY
POSTHOOK: Input: default@partition_test_partitioned@dt=100
-POSTHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-28-49_132_7250201183436014876/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-27-17_139_7616467668164915031/-mr-10000
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
238
@@ -156,19 +156,19 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(s
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
tableName:partition_test_partitioned
-owner:njain
-location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/partition_test_partitioned
+owner:krishnak
+location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/partition_test_partitioned
inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
columns:struct columns { string key, string value}
partitioned:true
partitionColumns:struct partition_columns { string dt}
totalNumberFiles:2
-totalFileSize:586
-maxFileSize:370
+totalFileSize:617
+maxFileSize:401
minFileSize:216
lastAccessTime:0
-lastUpdateTime:1282030136000
+lastUpdateTime:1301308055000
PREHOOK: query: show table extended like partition_test_partitioned partition(dt=100)
PREHOOK: type: SHOW_TABLESTATUS
@@ -179,8 +179,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(s
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
tableName:partition_test_partitioned
-owner:njain
-location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/partition_test_partitioned/dt=100
+owner:krishnak
+location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/partition_test_partitioned/dt=100
inputformat:org.apache.hadoop.mapred.TextInputFormat
outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
columns:struct columns { string key, string value}
@@ -191,7 +191,7 @@ totalFileSize:216
maxFileSize:216
minFileSize:216
lastAccessTime:0
-lastUpdateTime:1282030136000
+lastUpdateTime:1301308055000
PREHOOK: query: show table extended like partition_test_partitioned partition(dt=101)
PREHOOK: type: SHOW_TABLESTATUS
@@ -202,28 +202,28 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(s
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
tableName:partition_test_partitioned
-owner:njain
-location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/partition_test_partitioned/dt=101
+owner:krishnak
+location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/partition_test_partitioned/dt=101
inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
columns:struct columns { string key, string value}
partitioned:true
partitionColumns:struct partition_columns { string dt}
totalNumberFiles:1
-totalFileSize:370
-maxFileSize:370
-minFileSize:370
+totalFileSize:401
+maxFileSize:401
+minFileSize:401
lastAccessTime:0
-lastUpdateTime:1282030136000
+lastUpdateTime:1301308055000
PREHOOK: query: select key from partition_test_partitioned where dt=100
PREHOOK: type: QUERY
PREHOOK: Input: default@partition_test_partitioned@dt=100
-PREHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-28-57_738_4221917279395084924/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-27-37_639_5228682805283771837/-mr-10000
POSTHOOK: query: select key from partition_test_partitioned where dt=100
POSTHOOK: type: QUERY
POSTHOOK: Input: default@partition_test_partitioned@dt=100
-POSTHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-28-57_738_4221917279395084924/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-27-37_639_5228682805283771837/-mr-10000
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
@@ -256,11 +256,11 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(s
PREHOOK: query: select key from partition_test_partitioned where dt=101
PREHOOK: type: QUERY
PREHOOK: Input: default@partition_test_partitioned@dt=101
-PREHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-01_104_7633460107807770575/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-27-44_884_7494292850124737954/-mr-10000
POSTHOOK: query: select key from partition_test_partitioned where dt=101
POSTHOOK: type: QUERY
POSTHOOK: Input: default@partition_test_partitioned@dt=101
-POSTHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-01_104_7633460107807770575/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-27-44_884_7494292850124737954/-mr-10000
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
@@ -294,12 +294,12 @@ PREHOOK: query: select key from partition_test_partitioned
PREHOOK: type: QUERY
PREHOOK: Input: default@partition_test_partitioned@dt=100
PREHOOK: Input: default@partition_test_partitioned@dt=101
-PREHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-04_462_387937014678398458/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-27-52_961_222322694600111869/-mr-10000
POSTHOOK: query: select key from partition_test_partitioned
POSTHOOK: type: QUERY
POSTHOOK: Input: default@partition_test_partitioned@dt=100
POSTHOOK: Input: default@partition_test_partitioned@dt=101
-POSTHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-04_462_387937014678398458/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-27-52_961_222322694600111869/-mr-10000
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
@@ -391,19 +391,19 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(s
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
tableName:partition_test_partitioned
-owner:njain
-location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/partition_test_partitioned
+owner:krishnak
+location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/partition_test_partitioned
inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat
outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
columns:struct columns { string key, string value}
partitioned:true
partitionColumns:struct partition_columns { string dt}
totalNumberFiles:3
-totalFileSize:1474
+totalFileSize:1505
maxFileSize:888
minFileSize:216
lastAccessTime:0
-lastUpdateTime:1282030151000
+lastUpdateTime:1301308092000
PREHOOK: query: show table extended like partition_test_partitioned partition(dt=100)
PREHOOK: type: SHOW_TABLESTATUS
@@ -416,8 +416,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(s
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
tableName:partition_test_partitioned
-owner:njain
-location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/partition_test_partitioned/dt=100
+owner:krishnak
+location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/partition_test_partitioned/dt=100
inputformat:org.apache.hadoop.mapred.TextInputFormat
outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
columns:struct columns { string key, string value}
@@ -428,7 +428,7 @@ totalFileSize:216
maxFileSize:216
minFileSize:216
lastAccessTime:0
-lastUpdateTime:1282030151000
+lastUpdateTime:1301308092000
PREHOOK: query: show table extended like partition_test_partitioned partition(dt=101)
PREHOOK: type: SHOW_TABLESTATUS
@@ -441,19 +441,19 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(s
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
tableName:partition_test_partitioned
-owner:njain
-location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/partition_test_partitioned/dt=101
+owner:krishnak
+location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/partition_test_partitioned/dt=101
inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
columns:struct columns { string key, string value}
partitioned:true
partitionColumns:struct partition_columns { string dt}
totalNumberFiles:1
-totalFileSize:370
-maxFileSize:370
-minFileSize:370
+totalFileSize:401
+maxFileSize:401
+minFileSize:401
lastAccessTime:0
-lastUpdateTime:1282030151000
+lastUpdateTime:1301308092000
PREHOOK: query: show table extended like partition_test_partitioned partition(dt=102)
PREHOOK: type: SHOW_TABLESTATUS
@@ -466,8 +466,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(s
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
tableName:partition_test_partitioned
-owner:njain
-location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/partition_test_partitioned/dt=102
+owner:krishnak
+location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/partition_test_partitioned/dt=102
inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat
outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
columns:struct columns { string key, string value}
@@ -478,16 +478,16 @@ totalFileSize:888
maxFileSize:888
minFileSize:888
lastAccessTime:0
-lastUpdateTime:1282030151000
+lastUpdateTime:1301308092000
PREHOOK: query: select key from partition_test_partitioned where dt=100
PREHOOK: type: QUERY
PREHOOK: Input: default@partition_test_partitioned@dt=100
-PREHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-13_648_4852070201967333188/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-28-15_448_4730227192199108726/-mr-10000
POSTHOOK: query: select key from partition_test_partitioned where dt=100
POSTHOOK: type: QUERY
POSTHOOK: Input: default@partition_test_partitioned@dt=100
-POSTHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-13_648_4852070201967333188/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-28-15_448_4730227192199108726/-mr-10000
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
@@ -522,11 +522,11 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(s
PREHOOK: query: select key from partition_test_partitioned where dt=101
PREHOOK: type: QUERY
PREHOOK: Input: default@partition_test_partitioned@dt=101
-PREHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-16_874_2144423925129213549/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-28-23_374_3223676181616394812/-mr-10000
POSTHOOK: query: select key from partition_test_partitioned where dt=101
POSTHOOK: type: QUERY
POSTHOOK: Input: default@partition_test_partitioned@dt=101
-POSTHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-16_874_2144423925129213549/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-28-23_374_3223676181616394812/-mr-10000
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
@@ -561,11 +561,11 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(s
PREHOOK: query: select key from partition_test_partitioned where dt=102
PREHOOK: type: QUERY
PREHOOK: Input: default@partition_test_partitioned@dt=102
-PREHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-20_202_7935828228211072259/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-28-31_651_7624461423093868710/-mr-10000
POSTHOOK: query: select key from partition_test_partitioned where dt=102
POSTHOOK: type: QUERY
POSTHOOK: Input: default@partition_test_partitioned@dt=102
-POSTHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-20_202_7935828228211072259/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-28-31_651_7624461423093868710/-mr-10000
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
@@ -602,13 +602,13 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@partition_test_partitioned@dt=100
PREHOOK: Input: default@partition_test_partitioned@dt=101
PREHOOK: Input: default@partition_test_partitioned@dt=102
-PREHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-23_498_1472831792275217567/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-28-40_607_8576436476371286922/-mr-10000
POSTHOOK: query: select key from partition_test_partitioned
POSTHOOK: type: QUERY
POSTHOOK: Input: default@partition_test_partitioned@dt=100
POSTHOOK: Input: default@partition_test_partitioned@dt=101
POSTHOOK: Input: default@partition_test_partitioned@dt=102
-POSTHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-23_498_1472831792275217567/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-28-40_607_8576436476371286922/-mr-10000
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
@@ -695,13 +695,13 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@partition_test_partitioned@dt=100
PREHOOK: Input: default@partition_test_partitioned@dt=101
PREHOOK: Input: default@partition_test_partitioned@dt=102
-PREHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-26_942_5352511744427811614/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-28-53_002_7001863270329093164/-mr-10000
POSTHOOK: query: select key from partition_test_partitioned where dt >=100 and dt <= 102
POSTHOOK: type: QUERY
POSTHOOK: Input: default@partition_test_partitioned@dt=100
POSTHOOK: Input: default@partition_test_partitioned@dt=101
POSTHOOK: Input: default@partition_test_partitioned@dt=102
-POSTHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-26_942_5352511744427811614/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-28-53_002_7001863270329093164/-mr-10000
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat3.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat3.q.out
index 067ab43..e6d25f2 100644
--- a/ql/src/test/results/clientpositive/partition_wise_fileformat3.q.out
+++ b/ql/src/test/results/clientpositive/partition_wise_fileformat3.q.out
@@ -28,19 +28,19 @@ POSTHOOK: type: SHOW_TABLESTATUS
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
tableName:partition_test_partitioned
-owner:njain
-location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/partition_test_partitioned/dt=101
+owner:krishnak
+location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/partition_test_partitioned/dt=101
inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
columns:struct columns { string key, string value}
partitioned:true
partitionColumns:struct partition_columns { string dt}
totalNumberFiles:1
-totalFileSize:370
-maxFileSize:370
-minFileSize:370
+totalFileSize:401
+maxFileSize:401
+minFileSize:401
lastAccessTime:0
-lastUpdateTime:1282030189000
+lastUpdateTime:1301308334000
PREHOOK: query: alter table partition_test_partitioned set fileformat Sequencefile
PREHOOK: type: ALTERTABLE_FILEFORMAT
@@ -73,8 +73,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(s
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
tableName:partition_test_partitioned
-owner:njain
-location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/partition_test_partitioned/dt=102
+owner:krishnak
+location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/partition_test_partitioned/dt=102
inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat
outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
columns:struct columns { string key, string value}
@@ -85,16 +85,16 @@ totalFileSize:888
maxFileSize:888
minFileSize:888
lastAccessTime:0
-lastUpdateTime:1282030193000
+lastUpdateTime:1301308344000
PREHOOK: query: select key from partition_test_partitioned where dt=102
PREHOOK: type: QUERY
PREHOOK: Input: default@partition_test_partitioned@dt=102
-PREHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-54_035_3465387967141592899/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-32-24_969_2312628843276952894/-mr-10000
POSTHOOK: query: select key from partition_test_partitioned where dt=102
POSTHOOK: type: QUERY
POSTHOOK: Input: default@partition_test_partitioned@dt=102
-POSTHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-29-54_035_3465387967141592899/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-32-24_969_2312628843276952894/-mr-10000
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
@@ -149,8 +149,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(s
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
tableName:partition_test_partitioned
-owner:njain
-location:pfile:/data/users/njain/hive_commit2/hive_commit2/build/ql/test/data/warehouse/partition_test_partitioned/dt=101
+owner:krishnak
+location:pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/partition_test_partitioned/dt=101
inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat
outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
columns:struct columns { string key, string value}
@@ -161,16 +161,16 @@ totalFileSize:888
maxFileSize:888
minFileSize:888
lastAccessTime:0
-lastUpdateTime:1282030201000
+lastUpdateTime:1301308361000
PREHOOK: query: select key from partition_test_partitioned where dt=101
PREHOOK: type: QUERY
PREHOOK: Input: default@partition_test_partitioned@dt=101
-PREHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-30-01_709_7422795570962759074/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-32-42_031_6718146757498865551/-mr-10000
POSTHOOK: query: select key from partition_test_partitioned where dt=101
POSTHOOK: type: QUERY
POSTHOOK: Input: default@partition_test_partitioned@dt=101
-POSTHOOK: Output: file:/tmp/njain/hive_2010-08-17_00-30-01_709_7422795570962759074/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-32-42_031_6718146757498865551/-mr-10000
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
diff --git a/ql/src/test/results/clientpositive/sample10.q.out b/ql/src/test/results/clientpositive/sample10.q.out
index 50406c3..ac2c3a8 100644
--- a/ql/src/test/results/clientpositive/sample10.q.out
+++ b/ql/src/test/results/clientpositive/sample10.q.out
@@ -104,12 +104,12 @@ STAGE PLANS:
type: bigint
Needs Tagging: false
Path -> Alias:
- pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-08/hr=11/000000_0 [srcpartbucket]
- pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-08/hr=12/000000_0 [srcpartbucket]
- pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-09/hr=11/000000_0 [srcpartbucket]
- pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-09/hr=12/000000_0 [srcpartbucket]
+ pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-08/hr=11/000000_0 [srcpartbucket]
+ pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-08/hr=12/000000_0 [srcpartbucket]
+ pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-09/hr=11/000000_0 [srcpartbucket]
+ pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-09/hr=12/000000_0 [srcpartbucket]
Path -> Partition:
- pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-08/hr=11/000000_0
+ pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-08/hr=11/000000_0
Partition
base file name: 000000_0
input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
@@ -124,7 +124,7 @@ STAGE PLANS:
columns.types string:string
file.inputformat org.apache.hadoop.hive.ql.io.RCFileInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.RCFileOutputFormat
- location pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-08/hr=11
+ location pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-08/hr=11
name default.srcpartbucket
numFiles 16
numPartitions 4
@@ -133,8 +133,8 @@ STAGE PLANS:
serialization.ddl struct srcpartbucket { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
- totalSize 2748
- transient_lastDdlTime 1297386150
+ totalSize 3244
+ transient_lastDdlTime 1301308525
serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
@@ -146,7 +146,7 @@ STAGE PLANS:
columns.types string:string
file.inputformat org.apache.hadoop.hive.ql.io.RCFileInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.RCFileOutputFormat
- location pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket
+ location pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket
name default.srcpartbucket
numFiles 16
numPartitions 4
@@ -155,12 +155,12 @@ STAGE PLANS:
serialization.ddl struct srcpartbucket { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
- totalSize 2748
- transient_lastDdlTime 1297386150
+ totalSize 3244
+ transient_lastDdlTime 1301308525
serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
name: default.srcpartbucket
name: default.srcpartbucket
- pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-08/hr=12/000000_0
+ pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-08/hr=12/000000_0
Partition
base file name: 000000_0
input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
@@ -175,7 +175,7 @@ STAGE PLANS:
columns.types string:string
file.inputformat org.apache.hadoop.hive.ql.io.RCFileInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.RCFileOutputFormat
- location pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-08/hr=12
+ location pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-08/hr=12
name default.srcpartbucket
numFiles 16
numPartitions 4
@@ -184,8 +184,8 @@ STAGE PLANS:
serialization.ddl struct srcpartbucket { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
- totalSize 2748
- transient_lastDdlTime 1297386150
+ totalSize 3244
+ transient_lastDdlTime 1301308525
serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
@@ -197,7 +197,7 @@ STAGE PLANS:
columns.types string:string
file.inputformat org.apache.hadoop.hive.ql.io.RCFileInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.RCFileOutputFormat
- location pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket
+ location pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket
name default.srcpartbucket
numFiles 16
numPartitions 4
@@ -206,12 +206,12 @@ STAGE PLANS:
serialization.ddl struct srcpartbucket { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
- totalSize 2748
- transient_lastDdlTime 1297386150
+ totalSize 3244
+ transient_lastDdlTime 1301308525
serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
name: default.srcpartbucket
name: default.srcpartbucket
- pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-09/hr=11/000000_0
+ pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-09/hr=11/000000_0
Partition
base file name: 000000_0
input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
@@ -226,7 +226,7 @@ STAGE PLANS:
columns.types string:string
file.inputformat org.apache.hadoop.hive.ql.io.RCFileInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.RCFileOutputFormat
- location pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-09/hr=11
+ location pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-09/hr=11
name default.srcpartbucket
numFiles 16
numPartitions 4
@@ -235,8 +235,8 @@ STAGE PLANS:
serialization.ddl struct srcpartbucket { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
- totalSize 2748
- transient_lastDdlTime 1297386150
+ totalSize 3244
+ transient_lastDdlTime 1301308525
serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
@@ -248,7 +248,7 @@ STAGE PLANS:
columns.types string:string
file.inputformat org.apache.hadoop.hive.ql.io.RCFileInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.RCFileOutputFormat
- location pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket
+ location pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket
name default.srcpartbucket
numFiles 16
numPartitions 4
@@ -257,12 +257,12 @@ STAGE PLANS:
serialization.ddl struct srcpartbucket { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
- totalSize 2748
- transient_lastDdlTime 1297386150
+ totalSize 3244
+ transient_lastDdlTime 1301308525
serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
name: default.srcpartbucket
name: default.srcpartbucket
- pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-09/hr=12/000000_0
+ pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-09/hr=12/000000_0
Partition
base file name: 000000_0
input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
@@ -277,7 +277,7 @@ STAGE PLANS:
columns.types string:string
file.inputformat org.apache.hadoop.hive.ql.io.RCFileInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.RCFileOutputFormat
- location pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-09/hr=12
+ location pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket/ds=2008-04-09/hr=12
name default.srcpartbucket
numFiles 16
numPartitions 4
@@ -286,8 +286,8 @@ STAGE PLANS:
serialization.ddl struct srcpartbucket { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
- totalSize 2748
- transient_lastDdlTime 1297386150
+ totalSize 3244
+ transient_lastDdlTime 1301308525
serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
@@ -299,7 +299,7 @@ STAGE PLANS:
columns.types string:string
file.inputformat org.apache.hadoop.hive.ql.io.RCFileInputFormat
file.outputformat org.apache.hadoop.hive.ql.io.RCFileOutputFormat
- location pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/srcpartbucket
+ location pfile:/Users/krishnak/Projects/hdp/sources/hive-git-apache/build/ql/test/data/warehouse/srcpartbucket
name default.srcpartbucket
numFiles 16
numPartitions 4
@@ -308,8 +308,8 @@ STAGE PLANS:
serialization.ddl struct srcpartbucket { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
- totalSize 2748
- transient_lastDdlTime 1297386150
+ totalSize 3244
+ transient_lastDdlTime 1301308525
serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
name: default.srcpartbucket
name: default.srcpartbucket
@@ -333,9 +333,9 @@ STAGE PLANS:
File Output Operator
compressed: false
GlobalTableId: 0
- directory: file:/tmp/sdong/hive_2011-02-10_17-02-30_127_2856183230982405624/-ext-10001
+ directory: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-35-25_778_4151232483661284346/-ext-10001
NumFilesPerFileSink: 1
- Stats Publishing Key Prefix: file:/tmp/sdong/hive_2011-02-10_17-02-30_127_2856183230982405624/-ext-10001/
+ Stats Publishing Key Prefix: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-35-25_778_4151232483661284346/-ext-10001/
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -358,14 +358,14 @@ PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
-PREHOOK: Output: file:/tmp/sdong/hive_2011-02-10_17-02-31_052_5780136652498214851/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-35-26_823_381668417196391502/-mr-10000
POSTHOOK: query: select ds, count(1) from srcpartbucket tablesample (bucket 1 out of 4 on key) where ds is not null group by ds
POSTHOOK: type: QUERY
POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
-POSTHOOK: Output: file:/tmp/sdong/hive_2011-02-10_17-02-31_052_5780136652498214851/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-35-26_823_381668417196391502/-mr-10000
POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
@@ -382,14 +382,14 @@ PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
-PREHOOK: Output: file:/tmp/sdong/hive_2011-02-10_17-02-35_963_7688294481780262172/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-35-36_537_2262690193138337942/-mr-10000
POSTHOOK: query: select ds, count(1) from srcpartbucket tablesample (bucket 1 out of 2 on key) where ds is not null group by ds
POSTHOOK: type: QUERY
POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
-POSTHOOK: Output: file:/tmp/sdong/hive_2011-02-10_17-02-35_963_7688294481780262172/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-35-36_537_2262690193138337942/-mr-10000
POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
@@ -406,14 +406,14 @@ PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
-PREHOOK: Output: file:/tmp/sdong/hive_2011-02-10_17-02-42_219_2445297351151986459/-mr-10000
+PREHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-35-50_075_1096572952564273976/-mr-10000
POSTHOOK: query: select * from srcpartbucket where ds is not null
POSTHOOK: type: QUERY
POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
-POSTHOOK: Output: file:/tmp/sdong/hive_2011-02-10_17-02-42_219_2445297351151986459/-mr-10000
+POSTHOOK: Output: file:/var/folders/67/67R3POPtF90VG63KSmCbcU++F0U/-Tmp-/krishnak/hive_2011-03-28_03-35-50_075_1096572952564273976/-mr-10000
POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]