Index: ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java (revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java (working copy)
@@ -61,7 +61,7 @@
* RCFiles, short of Record Columnar File, are flat files
* consisting of binary key/value pairs, which shares much similarity with
* SequenceFile.
- *
+ *
* RCFile stores columns of a table in a record columnar way. It first
* partitions rows horizontally into row splits. and then it vertically
* partitions each row split in a columnar way. RCFile first stores the meta
@@ -75,7 +75,7 @@
* RCFile provides {@link Writer}, {@link Reader} and classes for
* writing, reading respectively.
*
* RCFile stores columns of a table in a record columnar way. It first * partitions rows horizontally into row splits. and then it vertically @@ -83,21 +83,21 @@ * data of a row split, as the key part of a record, and all the data of a row * split as the value part. *
- * + * ** RCFile compresses values in a more fine-grained manner then record level * compression. However, It currently does not support compress the key part * yet. The actual compression algorithm used to compress key and/or values can * be specified by using the appropriate {@link CompressionCodec}. *
- * + * ** The {@link Reader} is used to read and explain the bytes of RCFile. *
- * + * ** The position passed must be a position returned by * {@link RCFile.Writer#getLength()} when writing this file. To seek to an @@ -1258,7 +1258,7 @@ /** * Read and return the next record length, potentially skipping over a sync * block. - * + * * @return the length of the next record or -1 if there is no next record * @throws IOException */ @@ -1355,7 +1355,7 @@ currentValue.readFields(in); currentValue.inited = true; } - + public boolean nextBlock() throws IOException { int keyLength = nextKeyBuffer(); if(keyLength > 0) { @@ -1378,7 +1378,7 @@ * Calling getColumn() with not change the result of * {@link #next(LongWritable)} and * {@link #getCurrentRow(BytesRefArrayWritable)}. - * + * * @param columnID * @throws IOException */ @@ -1423,7 +1423,7 @@ * current value buffer. It will influence the result of * {@link #next(LongWritable)} and * {@link #getCurrentRow(BytesRefArrayWritable)} - * + * * @return whether there still has records or not * @throws IOException */ @@ -1438,7 +1438,7 @@ * of rows passed by, because {@link #seek(long)}, * {@link #nextColumnsBatch()} can change the underlying key buffer and * value buffer. - * + * * @return next row number * @throws IOException */ @@ -1472,7 +1472,7 @@ /** * get the current row used,make sure called {@link #next(LongWritable)} * first. - * + * * @throws IOException */ public synchronized void getCurrentRow(BytesRefArrayWritable ret) throws IOException { @@ -1546,7 +1546,7 @@ public String toString() { return file.toString(); } - + public boolean isCompressedRCFile() { return this.decompress; } @@ -1569,7 +1569,7 @@ public KeyBuffer getCurrentKeyBufferObj() { return this.currentKey; } - + /** * return the ValueBuffer object used in the reader. Internally in each * reader, there is only one ValueBuffer object, which gets reused for every @@ -1578,7 +1578,7 @@ public ValueBuffer getCurrentValueBufferObj() { return this.currentValue; } - + //return the current block's length public int getCurrentBlockLength() { return this.currentRecordLength; @@ -1593,11 +1593,11 @@ public int getCurrentCompressedKeyLen() { return this.compressedKeyLen; } - + //return the CompressionCodec used for this file public CompressionCodec getCompressionCodec() { return this.codec; } - + } } Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLike.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLike.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLike.java (working copy) @@ -91,16 +91,16 @@ * string in it for later pattern matching if it is a simple pattern. *
* Examples:
- *
+ *
*
* parseSimplePattern("%abc%") changes {@link #type} to PatternType.MIDDLE
* and changes {@link #simplePattern} to "abc"
* parseSimplePattern("%ab_c%") changes {@link #type} to PatternType.COMPLEX
* and does not change {@link #simplePattern}
*
- *
+ *
*
- *
+ *
* @param likePattern
* the input LIKE query pattern
*/
@@ -109,7 +109,7 @@
int beginIndex = 0;
int endIndex = length;
char lastChar = 'a';
- String strPattern = new String();
+ StringBuilder strPattern = new StringBuilder();
type = PatternType.NONE;
for (int i = 0; i < length; i++) {
@@ -119,7 +119,7 @@
type = PatternType.COMPLEX;
return;
} else { // such as "abc\%de%"
- strPattern += likePattern.substring(beginIndex, i - 1);
+ strPattern.append ( likePattern.substring(beginIndex, i-1));
beginIndex = i;
}
} else if (n == '%') {
@@ -131,7 +131,7 @@
type = PatternType.COMPLEX;
return;
} else { // such as "abc\%de%"
- strPattern += likePattern.substring(beginIndex, i - 1);
+ strPattern.append ( likePattern.substring(beginIndex, i-1));
beginIndex = i;
}
} else {
@@ -143,7 +143,7 @@
type = PatternType.BEGIN; // such as "abc%"
}
} else { // such as "abc\%"
- strPattern += likePattern.substring(beginIndex, i - 1);
+ strPattern.append ( likePattern.substring(beginIndex, i-1));
beginIndex = i;
endIndex = length;
}
@@ -152,8 +152,8 @@
lastChar = n;
}
- strPattern += likePattern.substring(beginIndex, endIndex);
- simplePattern.set(strPattern);
+ strPattern.append(likePattern.substring(beginIndex, endIndex));
+ simplePattern.set(strPattern.toString ());
}
private static boolean find(Text s, Text sub, int startS, int endS) {