diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java index 7bbc4bccc48..4fb78603d68 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java @@ -965,6 +965,11 @@ private void testCompactionDb(CompactionType compactionType, String resultDirNam Assert.assertEquals(expectedData, actualData); } + @Test public void testVectorizationOff() throws Exception { + conf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false); + testMinorCompactionAfterMajor(); + } + /** * Verify that the expected number of transactions have run, and their state is "succeeded". * diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java index 2c717b73d22..451390a6682 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java @@ -570,6 +570,11 @@ private void testMmCompactionDb(CompactionType compactionType, String resultDirN Assert.assertEquals(expectedData, actualData); } + @Test public void testVectorizationOff() throws Exception { + conf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false); + testMmMinorCompactionAfterMajor(); + } + /** * Verify that the expected number of transactions have run, and their state is "succeeded". * diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 1b84ba29a6c..22a24f8e747 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -962,7 +962,7 @@ public void process(Object row, int tag) throws HiveException { } } else { if (conf.isCompactionTable()) { - int bucketProperty = ((IntWritable)((Object[])row)[2]).get(); + int bucketProperty = getBucketProperty(row); bucketId = BucketCodec.determineVersion(bucketProperty).decodeWriterId(bucketProperty); } createBucketFiles(fsp); @@ -1684,4 +1684,19 @@ public void configureJobConf(JobConf job) { job.setBoolean(Utilities.ENSURE_OPERATORS_EXECUTED, true); } } + + /** + * Get the bucket property as an int from the row. This is necessary because + * VectorFileSinkOperator wraps row values in Writable objects. + * @param row as Object + * @return bucket property as int + */ + private int getBucketProperty(Object row) { + Object bucketProperty = ((Object[]) row)[2]; + if (bucketProperty instanceof Writable) { + return ((IntWritable) bucketProperty).get(); + } else { + return (int) bucketProperty; + } + } }