commit 6adf8afdffdacb3478ef5a7592193adf8037800a Author: Ivan Suller Date: Fri Jun 21 15:07:32 2019 +0200 HIVE-21905 Change-Id: Id811bd7341d9f67a5ff1414590b5e867049c9578 diff --git a/itests/hive-unit/.gitignore b/itests/hive-unit/.gitignore new file mode 100644 index 0000000000..ae3c172604 --- /dev/null +++ b/itests/hive-unit/.gitignore @@ -0,0 +1 @@ +/bin/ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java index 3550747c16..c8fa1b97dd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java @@ -214,7 +214,6 @@ public void setWork(FetchWork work) { */ private static final Map inputFormats = new HashMap(); - @SuppressWarnings("unchecked") public static InputFormat getInputFormatFromCache( Class inputFormatClass, Configuration conf) throws IOException { if (Configurable.class.isAssignableFrom(inputFormatClass) || @@ -319,7 +318,7 @@ static void setFetchOperatorContext(JobConf conf, List paths) { private RecordReader getRecordReader() throws Exception { if (!iterSplits.hasNext()) { - FetchInputFormatSplit[] splits = getNextSplits(); + List splits = getNextSplits(); if (splits == null) { return null; } @@ -335,7 +334,7 @@ static void setFetchOperatorContext(JobConf conf, List paths) { if (isPartitioned) { row[1] = createPartValue(currDesc, partKeyOI); } - iterSplits = Arrays.asList(splits).iterator(); + iterSplits = splits.iterator(); if (LOG.isDebugEnabled()) { LOG.debug("Creating fetchTask with deserializer typeinfo: " @@ -348,7 +347,6 @@ static void setFetchOperatorContext(JobConf conf, List paths) { final FetchInputFormatSplit target = iterSplits.next(); - @SuppressWarnings("unchecked") final RecordReader reader = target.getRecordReader(job); if (hasVC || work.getSplitSample() != null) { currRecReader = new HiveRecordReader(reader, job) { @@ -374,7 +372,7 @@ public boolean doNext(WritableComparable key, Writable value) throws IOException return currRecReader; } - protected FetchInputFormatSplit[] getNextSplits() throws Exception { + private List getNextSplits() throws Exception { while (getNextPath()) { // not using FileInputFormat.setInputPaths() here because it forces a connection to the // default file system - which may or may not be online during pure metadata operations @@ -437,7 +435,7 @@ public boolean doNext(WritableComparable key, Writable value) throws IOException if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_IN_TEST)) { Collections.sort(inputSplits, new FetchInputFormatSplitComparator()); } - return inputSplits.toArray(new FetchInputFormatSplit[inputSplits.size()]); + return inputSplits; } return null; @@ -663,7 +661,6 @@ public void closeOperator() throws HiveException { */ public void setupContext(List paths) { this.iterPath = paths.iterator(); - List partitionDescs; if (!isPartitioned) { this.iterPartDesc = Iterators.cycle(new PartitionDesc(work.getTblDesc(), null)); } else { @@ -689,7 +686,6 @@ private StructObjectInspector setupOutputObjectInspector() throws HiveException } partKeyOI = getPartitionKeyOI(tableDesc); - PartitionDesc partDesc = new PartitionDesc(tableDesc, null); List listParts = work.getPartDesc(); // Chose the table descriptor if none of the partitions is present. // For eg: consider the query: diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FooterBuffer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FooterBuffer.java index be88dad078..8ead7975e5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FooterBuffer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FooterBuffer.java @@ -29,7 +29,7 @@ import org.apache.hadoop.util.ReflectionUtils; public class FooterBuffer { - private ArrayList buffer; + private ArrayList> buffer; private int cur; public FooterBuffer() { @@ -64,13 +64,13 @@ public boolean initializeBuffer(JobConf job, RecordReader recordreader, int footerCount, WritableComparable key, Writable value) throws IOException { // Fill the buffer with key value pairs. - this.buffer = new ArrayList(); + this.buffer = new ArrayList<>(); while (buffer.size() < footerCount) { boolean notEOF = recordreader.next(key, value); if (!notEOF) { return false; } - ObjectPair tem = new ObjectPair(); + ObjectPair tem = new ObjectPair<>(); tem.setFirst(ReflectionUtils.copy(job, key, tem.getFirst())); tem.setSecond(ReflectionUtils.copy(job, value, tem.getSecond())); buffer.add(tem); @@ -98,8 +98,8 @@ public boolean initializeBuffer(JobConf job, RecordReader recordreader, */ public boolean updateBuffer(JobConf job, RecordReader recordreader, WritableComparable key, Writable value) throws IOException { - key = ReflectionUtils.copy(job, (WritableComparable)buffer.get(cur).getFirst(), key); - value = ReflectionUtils.copy(job, (Writable)buffer.get(cur).getSecond(), value); + key = ReflectionUtils.copy(job, buffer.get(cur).getFirst(), key); + value = ReflectionUtils.copy(job, buffer.get(cur).getSecond(), value); boolean notEOF = recordreader.next(buffer.get(cur).getFirst(), buffer.get(cur).getSecond()); if (notEOF) { cur = (++cur) % buffer.size(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index d91cd60668..bc75ec0b56 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -183,7 +183,6 @@ import org.apache.hadoop.io.SequenceFile.CompressionType; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.DefaultCodec; import org.apache.hadoop.mapred.FileInputFormat; @@ -3823,8 +3822,8 @@ public static void clearWorkMap(Configuration conf) { * @return Return true if there are 0 or more records left in the file * after skipping all headers, otherwise return false. */ - public static boolean skipHeader(RecordReader currRecReader, - int headerCount, WritableComparable key, Writable value) throws IOException { + public static boolean skipHeader(RecordReader currRecReader, int headerCount, K key, V value) + throws IOException { while (headerCount > 0) { if (!currRecReader.next(key, value)) { return false; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java index 0287bd363f..7f3ef37258 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java @@ -54,7 +54,8 @@ * a binary search to find the block to begin reading from, and stop reading once it can be * determined no other entries will match the filter. */ -public abstract class HiveContextAwareRecordReader implements RecordReader { +public abstract class HiveContextAwareRecordReader + implements RecordReader { private static final Logger LOG = LoggerFactory.getLogger(HiveContextAwareRecordReader.class.getName()); @@ -68,7 +69,7 @@ private final List stopComparisons = new ArrayList(); private Map pathToPartitionInfo; - protected RecordReader recordReader; + protected RecordReader recordReader; protected JobConf jobConf; protected boolean isSorted = false; @@ -76,17 +77,17 @@ public HiveContextAwareRecordReader(JobConf conf) throws IOException { this(null, conf); } - public HiveContextAwareRecordReader(RecordReader recordReader) { + public HiveContextAwareRecordReader(RecordReader recordReader) { this.recordReader = recordReader; } - public HiveContextAwareRecordReader(RecordReader recordReader, JobConf conf) + public HiveContextAwareRecordReader(RecordReader recordReader, JobConf conf) throws IOException { this.recordReader = recordReader; this.jobConf = conf; } - public void setRecordReader(RecordReader recordReader) { + public void setRecordReader(RecordReader recordReader) { this.recordReader = recordReader; } @@ -345,12 +346,12 @@ public boolean doNext(K key, V value) throws IOException { } // If input contains header, skip header. - if (!Utilities.skipHeader(recordReader, headerCount, (WritableComparable)key, (Writable)value)) { + if (!Utilities.skipHeader(recordReader, headerCount, key, value)) { return false; } if (footerCount > 0) { footerBuffer = new FooterBuffer(); - if (!footerBuffer.initializeBuffer(jobConf, recordReader, footerCount, (WritableComparable)key, (Writable)value)) { + if (!footerBuffer.initializeBuffer(jobConf, recordReader, footerCount, key, value)) { return false; } } @@ -360,7 +361,7 @@ public boolean doNext(K key, V value) throws IOException { // Table files don't have footer rows. return recordReader.next(key, value); } else { - return footerBuffer.updateBuffer(jobConf, recordReader, (WritableComparable)key, (Writable)value); + return footerBuffer.updateBuffer(jobConf, recordReader, key, value); } } catch (Exception e) { return HiveIOExceptionHandlerUtil.handleRecordReaderNextException(e, jobConf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveRecordReader.java index 3864060233..359184a6d2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveRecordReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveRecordReader.java @@ -35,12 +35,12 @@ - public HiveRecordReader(RecordReader recordReader) + public HiveRecordReader(RecordReader recordReader) throws IOException { super(recordReader); } - public HiveRecordReader(RecordReader recordReader, JobConf conf) + public HiveRecordReader(RecordReader recordReader, JobConf conf) throws IOException { super(recordReader, conf); } @@ -50,14 +50,17 @@ public void doClose() throws IOException { recordReader.close(); } + @Override public K createKey() { - return (K) recordReader.createKey(); + return recordReader.createKey(); } + @Override public V createValue() { - return (V) recordReader.createValue(); + return recordReader.createValue(); } + @Override public long getPos() throws IOException { return recordReader.getPos(); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java index 1e46c60df9..3c53878fef 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java @@ -59,7 +59,7 @@ private RCFileRecordReader rcfReader; private JobConf conf; private TestHiveInputSplit hiveSplit; - private HiveContextAwareRecordReader hbsReader; + private HiveContextAwareRecordReader hbsReader; private IOContext ioContext; private static class TestHiveInputSplit extends HiveInputSplit { @@ -148,52 +148,52 @@ private void init() throws IOException { hbsReader.initIOContext(hiveSplit, conf, Class.class, rcfReader); } - private boolean executeDoNext(HiveContextAwareRecordReader hbsReader) throws IOException { + private boolean executeDoNext() throws IOException { return hbsReader.next(hbsReader.createKey(), hbsReader.createValue()); } public void testNonLinearGreaterThan() throws Exception { init(); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); verify(rcfReader).sync(50); ioContext.setComparison(1); when(rcfReader.getPos()).thenReturn(25L); // By setting the comparison to greater, the search should use the block [0, 50] - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); verify(rcfReader).sync(25); } public void testNonLinearLessThan() throws Exception { init(); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); verify(rcfReader).sync(50); ioContext.setComparison(-1); when(rcfReader.getPos()).thenReturn(75L); // By setting the comparison to less, the search should use the block [50, 100] - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); verify(rcfReader).sync(75); } public void testNonLinearEqualTo() throws Exception { init(); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); verify(rcfReader).sync(50); ioContext.setComparison(0); when(rcfReader.getPos()).thenReturn(25L); // By setting the comparison to equal, the search should use the block [0, 50] - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); verify(rcfReader).sync(25); } public void testHitLastBlock() throws Exception { init(); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); verify(rcfReader).sync(50); ioContext.setComparison(-1); @@ -202,7 +202,7 @@ public void testHitLastBlock() throws Exception { // When sync is called it will return 100, the value signaling the end of the file, this should // result in a call to sync to the beginning of the block it was searching [50, 100], and it // should continue normally - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); InOrder inOrder = inOrder(rcfReader); inOrder.verify(rcfReader).sync(75); inOrder.verify(rcfReader).sync(50); @@ -211,14 +211,14 @@ public void testHitLastBlock() throws Exception { public void testHitSamePositionTwice() throws Exception { init(); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); verify(rcfReader).sync(50); ioContext.setComparison(1); // When getPos is called it should return the same value, signaling the end of the search, so // the search should continue linearly and it should sync to the beginning of the block [0, 50] - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); InOrder inOrder = inOrder(rcfReader); inOrder.verify(rcfReader).sync(25); inOrder.verify(rcfReader).sync(0); @@ -228,20 +228,20 @@ public void testHitSamePositionTwice() throws Exception { public void testResetRange() throws Exception { init(); InOrder inOrder = inOrder(rcfReader); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); inOrder.verify(rcfReader).sync(50); ioContext.setComparison(-1); when(rcfReader.getPos()).thenReturn(75L); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); inOrder.verify(rcfReader).sync(75); ioContext.setEndBinarySearch(true); // This should make the search linear, sync to the beginning of the block being searched // [50, 100], set the comparison to be null, and the flag to reset the range should be unset - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); inOrder.verify(rcfReader).sync(50); Assert.assertFalse(ioContext.isBinarySearching()); Assert.assertFalse(ioContext.shouldEndBinarySearch()); @@ -251,68 +251,68 @@ public void testEqualOpClass() throws Exception { init(); ioContext.setGenericUDFClassName(GenericUDFOPEqual.class.getName()); Assert.assertTrue(ioContext.isBinarySearching()); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); ioContext.setBinarySearching(false); ioContext.setComparison(-1); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); ioContext.setComparison(0); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); ioContext.setComparison(1); - Assert.assertFalse(executeDoNext(hbsReader)); + Assert.assertFalse(executeDoNext()); } public void testLessThanOpClass() throws Exception { init(); ioContext.setGenericUDFClassName(GenericUDFOPLessThan.class.getName()); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); Assert.assertFalse(ioContext.isBinarySearching()); ioContext.setComparison(-1); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); ioContext.setComparison(0); - Assert.assertFalse(executeDoNext(hbsReader)); + Assert.assertFalse(executeDoNext()); ioContext.setComparison(1); - Assert.assertFalse(executeDoNext(hbsReader)); + Assert.assertFalse(executeDoNext()); } public void testLessThanOrEqualOpClass() throws Exception { init(); ioContext.setGenericUDFClassName(GenericUDFOPEqualOrLessThan.class.getName()); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); Assert.assertFalse(ioContext.isBinarySearching()); ioContext.setComparison(-1); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); ioContext.setComparison(0); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); ioContext.setComparison(1); - Assert.assertFalse(executeDoNext(hbsReader)); + Assert.assertFalse(executeDoNext()); } public void testGreaterThanOpClass() throws Exception { init(); ioContext.setGenericUDFClassName(GenericUDFOPGreaterThan.class.getName()); Assert.assertTrue(ioContext.isBinarySearching()); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); ioContext.setBinarySearching(false); ioContext.setComparison(-1); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); ioContext.setComparison(0); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); ioContext.setComparison(1); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); } public void testGreaterThanOrEqualOpClass() throws Exception { init(); ioContext.setGenericUDFClassName(GenericUDFOPEqualOrGreaterThan.class.getName()); Assert.assertTrue(ioContext.isBinarySearching()); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); ioContext.setBinarySearching(false); ioContext.setComparison(-1); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); ioContext.setComparison(0); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); ioContext.setComparison(1); - Assert.assertTrue(executeDoNext(hbsReader)); + Assert.assertTrue(executeDoNext()); } public static void main(String[] args) throws Exception {