diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 7932a3d..ab2998e 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -518,6 +518,8 @@ HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_"), HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32), HIVETESTMODENOSAMPLE("hive.test.mode.nosamplelist", ""), + HIVETESTMODEDUMMYSTATAGGR("hive.test.dummystats.aggregator", ""), + HIVETESTMODEDUMMYSTATPUB("hive.test.dummystats.publisher", ""), HIVEMERGEMAPFILES("hive.merge.mapfiles", true), HIVEMERGEMAPREDFILES("hive.merge.mapredfiles", false), @@ -533,6 +535,11 @@ HIVEUSEEXPLICITRCFILEHEADER("hive.exec.rcfile.use.explicit.header", true), HIVEUSERCFILESYNCCACHE("hive.exec.rcfile.use.sync.cache", true), + HIVE_RCFILE_RECORD_INTERVAL("hive.io.rcfile.record.interval", Integer.MAX_VALUE), + HIVE_RCFILE_COLUMN_NUMBER_CONF("hive.io.rcfile.column.number.conf", 0), + HIVE_RCFILE_TOLERATE_CORRUPTIONS("hive.io.rcfile.tolerate.corruptions", false), + HIVE_RCFILE_RECORD_BUFFER_SIZE("hive.io.rcfile.record.buffer.size", 4194304), // 4M + // Maximum fraction of heap that can be used by ORC file writers HIVE_ORC_FILE_MEMORY_POOL("hive.exec.orc.memory.pool", 0.5f), // 50% // Define the version of the file to write @@ -779,6 +786,7 @@ // For HBase storage handler HIVE_HBASE_WAL_ENABLED("hive.hbase.wal.enabled", true), + HIVE_HBASE_GENERATE_HFILES("hive.hbase.generatehfiles", false), // For har files HIVEARCHIVEENABLED("hive.archive.enabled", false), @@ -836,6 +844,8 @@ HIVE_ERROR_ON_EMPTY_PARTITION("hive.error.on.empty.partition", false), + HIVE_INDEX_COMPACT_FILE("hive.index.compact.file", ""), // internal variable + HIVE_INDEX_BLOCKFILTER_FILE("hive.index.blockfilter.file", ""), // internal variable HIVE_INDEX_IGNORE_HDFS_LOC("hive.index.compact.file.ignore.hdfs", false), HIVE_EXIM_URI_SCHEME_WL("hive.exim.uri.scheme.whitelist", "hdfs,pfile"), @@ -1421,6 +1431,31 @@ private void initialize(Class cls) { setBoolVar(ConfVars.METASTORE_FIXED_DATASTORE, true); } + if (getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) { + List trimmed = new ArrayList(); + for (Map.Entry entry : this) { + String key = entry.getKey(); + if (key == null || !key.startsWith("hive.")) { + continue; + } + ConfVars var = HiveConf.getConfVars(key); + if (var == null) { + var = HiveConf.getConfVars(key.trim()); + if (var != null) { + trimmed.add(key); + } + } + if (var == null) { + l4j.warn("HiveConf of name " + key + " does not exist"); + } else if (!var.isType(entry.getValue())) { + l4j.warn("HiveConf " + var.varname + " expects " + var.typeString() + " type value"); + } + } + for (String key : trimmed) { + set(key.trim(), getRaw(key)); + unset(key); + } + } // setup list of conf vars that are not allowed to change runtime setupRestrictList(); } diff --git hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java index 7b91e1d..2a1c0be 100644 --- hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java +++ hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.hbase.ColumnMappings.ColumnMapping; import org.apache.hadoop.hive.metastore.HiveMetaHook; import org.apache.hadoop.hive.metastore.MetaStoreUtils; @@ -373,7 +374,7 @@ public void configureTableJobProperties( * online table. This mode is implicitly applied when "hive.hbase.completebulkload" is true. */ public static boolean isHBaseGenerateHFiles(Configuration conf) { - return conf.getBoolean("hive.hbase.generatehfiles", false); + return HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_HBASE_GENERATE_HFILES); } /** diff --git hbase-handler/src/test/queries/positive/hbase_stats.q hbase-handler/src/test/queries/positive/hbase_stats.q index 52efef5..3350dde 100644 --- hbase-handler/src/test/queries/positive/hbase_stats.q +++ hbase-handler/src/test/queries/positive/hbase_stats.q @@ -1,6 +1,6 @@ set datanucleus.cache.collections=false; set hive.stats.autogather=true; -set hive.ststs.atomic=false; +set hive.stats.atomic=false; set hive.stats.dbclass=hbase; diff --git hbase-handler/src/test/queries/positive/hbase_stats2.q hbase-handler/src/test/queries/positive/hbase_stats2.q index 520e003..f6c71c3 100644 --- hbase-handler/src/test/queries/positive/hbase_stats2.q +++ hbase-handler/src/test/queries/positive/hbase_stats2.q @@ -1,7 +1,7 @@ set datanucleus.cache.collections=false; set hive.stats.autogather=true; set hive.stats.atomic=false; -set hive.stats.collect.uncompressedsize=false; +set hive.stats.collect.rawdatasize=false; set hive.stats.dbclass=hbase; diff --git hbase-handler/src/test/queries/positive/hbase_stats3.q hbase-handler/src/test/queries/positive/hbase_stats3.q index c3134f0..c74fa08 100644 --- hbase-handler/src/test/queries/positive/hbase_stats3.q +++ hbase-handler/src/test/queries/positive/hbase_stats3.q @@ -1,7 +1,7 @@ set datanucleus.cache.collections=false; set hive.stats.autogather=true; set hive.stats.atomic=false; -set hive.stats.collect.uncompressedsize=false; +set hive.stats.collect.rawdatasize=false; create table stats_part like srcpart; diff --git hbase-handler/src/test/results/positive/hbase_stats2.q.out hbase-handler/src/test/results/positive/hbase_stats2.q.out index 80e1c6d..5dcbc5a 100644 --- hbase-handler/src/test/results/positive/hbase_stats2.q.out +++ hbase-handler/src/test/results/positive/hbase_stats2.q.out @@ -45,7 +45,7 @@ Table Parameters: COLUMN_STATS_ACCURATE true numFiles 1 numRows 500 - rawDataSize 5312 + rawDataSize 0 totalSize 5812 #### A masked pattern was here #### @@ -185,7 +185,7 @@ Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 1 numRows 500 - rawDataSize 5312 + rawDataSize 0 totalSize 5812 #### A masked pattern was here #### @@ -227,7 +227,7 @@ Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 1 numRows 500 - rawDataSize 5312 + rawDataSize 0 totalSize 5812 #### A masked pattern was here #### diff --git hbase-handler/src/test/results/positive/hbase_stats3.q.out hbase-handler/src/test/results/positive/hbase_stats3.q.out index ce7dda4..deed6be 100644 --- hbase-handler/src/test/results/positive/hbase_stats3.q.out +++ hbase-handler/src/test/results/positive/hbase_stats3.q.out @@ -45,7 +45,7 @@ Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 1 numRows 500 - rawDataSize 5312 + rawDataSize 0 totalSize 5812 #### A masked pattern was here #### @@ -99,7 +99,7 @@ Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 1 numRows 500 - rawDataSize 5312 + rawDataSize 0 totalSize 5812 #### A masked pattern was here #### @@ -153,7 +153,7 @@ Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 1 numRows 500 - rawDataSize 5312 + rawDataSize 0 totalSize 5812 #### A masked pattern was here #### @@ -211,7 +211,7 @@ Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 1 numRows 500 - rawDataSize 5312 + rawDataSize 0 totalSize 5812 #### A masked pattern was here #### @@ -265,7 +265,7 @@ Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 1 numRows 500 - rawDataSize 5312 + rawDataSize 0 totalSize 5812 #### A masked pattern was here #### @@ -319,7 +319,7 @@ Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 1 numRows 500 - rawDataSize 5312 + rawDataSize 0 totalSize 5812 #### A masked pattern was here #### diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java index 0c1fa23..d6ae9f2 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java @@ -20,6 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.io.RCFile; import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; import org.apache.hadoop.hive.ql.io.orc.OrcFile; @@ -62,7 +63,7 @@ public static void addSpecialCasesParametersToOutputJobProperties( OutputJobInfo jobInfo, Class ofclass) { if (ofclass == RCFileOutputFormat.class) { // RCFile specific parameter - jobProperties.put(RCFile.COLUMN_NUMBER_CONF_STR, + jobProperties.put(HiveConf.ConfVars.HIVE_RCFILE_COLUMN_NUMBER_CONF.varname, Integer.toOctalString( jobInfo.getOutputSchema().getFields().size())); } else if (ofclass == OrcOutputFormat.class) { diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceOutputFormat.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceOutputFormat.java index b09ab4c..64699f2 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceOutputFormat.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceOutputFormat.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.io.RCFile; +import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; import org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.compress.CompressionCodec; @@ -50,7 +51,7 @@ */ public static void setColumnNumber(Configuration conf, int columnNum) { assert columnNum > 0; - conf.setInt(RCFile.COLUMN_NUMBER_CONF_STR, columnNum); + RCFileOutputFormat.setColumnNumber(conf, columnNum); } /* (non-Javadoc) diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java index 9a89980..02a13b3 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.io.RCFile; import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; import org.apache.hadoop.hive.serde.serdeConstants; @@ -210,7 +211,7 @@ private void writeThenReadByRecordReader(int intervalRecordCount, fs.delete(testFile, true); Configuration cloneConf = new Configuration(conf); RCFileOutputFormat.setColumnNumber(cloneConf, bytesArray.length); - cloneConf.setInt(RCFile.RECORD_INTERVAL_CONF_STR, intervalRecordCount); + cloneConf.setInt(HiveConf.ConfVars.HIVE_RCFILE_RECORD_INTERVAL.varname, intervalRecordCount); RCFile.Writer writer = new RCFile.Writer(fs, cloneConf, testFile, null, codec); diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyOverriddenConfigsHook.java itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyOverriddenConfigsHook.java index 41c178a..57baef0 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyOverriddenConfigsHook.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyOverriddenConfigsHook.java @@ -34,11 +34,11 @@ */ public class VerifyOverriddenConfigsHook implements ExecuteWithHookContext { - // A config variable set via a System Propery, a config variable set in the CLI, + // A config variable set via a System Property, a config variable set in the CLI, // a config variable not in the default List of config variables, and a config variable in the - // default list of conifg variables, but which has not been overridden + // default list of config variables, but which has not been overridden private static String[] keysArray = - {"mapred.job.tracker", "hive.exec.post.hooks", "hive.config.doesnt.exit", + {"mapred.job.tracker", "hive.exec.post.hooks", "some.hive.config.doesnt.exit", "hive.exec.mode.local.auto"}; private static List keysList = Arrays.asList(keysArray); diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java index 1bafd97..327eabc 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.stats; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.Task; /** @@ -34,7 +35,7 @@ // This is a test. The parameter hive.test.dummystats.aggregator's value // denotes the method which needs to throw an error. public boolean connect(Configuration hconf, Task sourceTask) { - errorMethod = hconf.get("hive.test.dummystats.aggregator", ""); + errorMethod = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVETESTMODEDUMMYSTATAGGR); if (errorMethod.equalsIgnoreCase("connect")) { return false; } diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java index 4dd632d..1f6e80f 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java @@ -21,6 +21,7 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; /** * An test implementation for StatsPublisher. @@ -36,7 +37,7 @@ // This is a test. The parameter hive.test.dummystats.publisher's value // denotes the method which needs to throw an error. public boolean init(Configuration hconf) { - errorMethod = hconf.get("hive.test.dummystats.publisher", ""); + errorMethod = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVETESTMODEDUMMYSTATPUB); if (errorMethod.equalsIgnoreCase("init")) { return false; } @@ -45,7 +46,7 @@ public boolean init(Configuration hconf) { } public boolean connect(Configuration hconf) { - errorMethod = hconf.get("hive.test.dummystats.publisher", ""); + errorMethod = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVETESTMODEDUMMYSTATPUB); if (errorMethod.equalsIgnoreCase("connect")) { return false; } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 5e5cf97..29d59a4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -3220,8 +3220,8 @@ public static void setInputAttributes(Configuration conf, MapWork mWork) { HiveConf.setVar(conf, var, mWork.getInputformat()); } if (mWork.getIndexIntermediateFile() != null) { - conf.set("hive.index.compact.file", mWork.getIndexIntermediateFile()); - conf.set("hive.index.blockfilter.file", mWork.getIndexIntermediateFile()); + conf.set(ConfVars.HIVE_INDEX_COMPACT_FILE.varname, mWork.getIndexIntermediateFile()); + conf.set(ConfVars.HIVE_INDEX_BLOCKFILTER_FILE.varname, mWork.getIndexIntermediateFile()); } // Intentionally overwrites anything the user may have put here diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java index 179ad29..a5482ef 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java @@ -551,8 +551,8 @@ protected void setInputAttributes(Configuration conf) { HiveConf.setVar(conf, ConfVars.HIVEINPUTFORMAT, mWork.getInputformat()); } if (mWork.getIndexIntermediateFile() != null) { - conf.set("hive.index.compact.file", mWork.getIndexIntermediateFile()); - conf.set("hive.index.blockfilter.file", mWork.getIndexIntermediateFile()); + conf.set(ConfVars.HIVE_INDEX_COMPACT_FILE.varname, mWork.getIndexIntermediateFile()); + conf.set(ConfVars.HIVE_INDEX_BLOCKFILTER_FILE.varname, mWork.getIndexIntermediateFile()); } // Intentionally overwrites anything the user may have put here diff --git ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java index 3bc7e43..bdbe933 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java @@ -340,15 +340,8 @@ private static final Log LOG = LogFactory.getLog(RCFile.class); - public static final String RECORD_INTERVAL_CONF_STR = "hive.io.rcfile.record.interval"; - public static final String COLUMN_NUMBER_METADATA_STR = "hive.io.rcfile.column.number"; - public static final String COLUMN_NUMBER_CONF_STR = "hive.io.rcfile.column.number.conf"; - - public static final String TOLERATE_CORRUPTIONS_CONF_STR = - "hive.io.rcfile.tolerate.corruptions"; - // HACK: We actually need BlockMissingException, but that is not available // in all hadoop versions. public static final String BLOCK_MISSING_MESSAGE = @@ -985,8 +978,8 @@ public Writer(FileSystem fs, Configuration conf, Path name, public Writer(FileSystem fs, Configuration conf, Path name, int bufferSize, short replication, long blockSize, Progressable progress, Metadata metadata, CompressionCodec codec) throws IOException { - RECORD_INTERVAL = conf.getInt(RECORD_INTERVAL_CONF_STR, RECORD_INTERVAL); - columnNumber = conf.getInt(COLUMN_NUMBER_CONF_STR, 0); + RECORD_INTERVAL = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_RCFILE_RECORD_INTERVAL); + columnNumber = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_RCFILE_COLUMN_NUMBER_CONF); if (metadata == null) { metadata = new Metadata(); @@ -1346,8 +1339,8 @@ public Reader(FileSystem fs, Path file, Configuration conf) throws IOException { /** Create a new RCFile reader. */ public Reader(FileSystem fs, Path file, int bufferSize, Configuration conf, long start, long length) throws IOException { - tolerateCorruptions = conf.getBoolean( - TOLERATE_CORRUPTIONS_CONF_STR, false); + tolerateCorruptions = HiveConf.getBoolVar( + conf, HiveConf.ConfVars.HIVE_RCFILE_TOLERATE_CORRUPTIONS); conf.setInt("io.file.buffer.size", bufferSize); this.file = file; in = openFile(fs, file, bufferSize, length); diff --git ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java index 953d9b4..5ec5344 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable; import org.apache.hadoop.io.Writable; @@ -57,7 +58,7 @@ */ public static void setColumnNumber(Configuration conf, int columnNum) { assert columnNum > 0; - conf.setInt(RCFile.COLUMN_NUMBER_CONF_STR, columnNum); + conf.setInt(HiveConf.ConfVars.HIVE_RCFILE_COLUMN_NUMBER_CONF.varname, columnNum); } /** @@ -67,7 +68,7 @@ public static void setColumnNumber(Configuration conf, int columnNum) { * @return number of columns for RCFile's writer */ public static int getColumnNumber(Configuration conf) { - return conf.getInt(RCFile.COLUMN_NUMBER_CONF_STR, 0); + return HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_RCFILE_COLUMN_NUMBER_CONF); } /** {@inheritDoc} */ diff --git ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java index ffd7597..066afa3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.RCFile; +import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; @@ -150,7 +151,7 @@ public void map(Object k, RCFileValueBufferWrapper value, if (outWriter == null) { codec = key.codec; columnNumber = key.keyBuffer.getColumnNumber(); - jc.setInt(RCFile.COLUMN_NUMBER_CONF_STR, columnNumber); + RCFileOutputFormat.setColumnNumber(jc, columnNumber); outWriter = new RCFile.Writer(fs, jc, outPath, null, codec); } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java index 257f186..6d8694b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.RCFile; +import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; import org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileKeyBufferWrapper; import org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileValueBufferWrapper; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -124,7 +125,7 @@ public void map(Object k, RCFileValueBufferWrapper value, if (outWriter == null) { codec = key.getCodec(); columnNumber = key.getKeyBuffer().getColumnNumber(); - jc.setInt(RCFile.COLUMN_NUMBER_CONF_STR, columnNumber); + RCFileOutputFormat.setColumnNumber(jc, columnNumber); outWriter = new RCFile.Writer(fs, jc, outPath, null, codec); } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index a988b44..4d35176 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -104,7 +104,6 @@ import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.shims.HadoopShims; -import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatus; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.util.StringUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java index 9b24bfd..e72ac8b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java @@ -116,6 +116,7 @@ public static int setVariable(String varname, String varvalue) throws IllegalArg if (varvalue.contains("\n")){ ss.err.println("Warning: Value had a \\n character in it."); } + varname = varname.trim(); if (varname.startsWith(SetProcessor.ENV_PREFIX)){ ss.err.println("env:* variables can not be set."); return 1; @@ -145,8 +146,8 @@ private static void setConf(String varname, String key, String varvalue, boolean if (!confVars.isType(value)) { StringBuilder message = new StringBuilder(); message.append("'SET ").append(varname).append('=').append(varvalue); - message.append("' FAILED because ").append(key).append(" expects an "); - message.append(confVars.typeString()).append(" value."); + message.append("' FAILED because ").append(key).append(" expects "); + message.append(confVars.typeString()).append(" type value."); throw new IllegalArgumentException(message.toString()); } String fail = confVars.validate(value); @@ -156,6 +157,8 @@ private static void setConf(String varname, String key, String varvalue, boolean message.append("' FAILED in validation : ").append(fail).append('.'); throw new IllegalArgumentException(message.toString()); } + } else if (key.startsWith("hive.")) { + throw new IllegalArgumentException("hive configuration " + key + " does not exists."); } } conf.verifyAndSet(key, value); diff --git ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java index 464bd5e..cff5ada 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java @@ -624,7 +624,7 @@ public void testSync() throws IOException { int writeCount = 2500; Configuration cloneConf = new Configuration(conf); RCFileOutputFormat.setColumnNumber(cloneConf, bytesArray.length); - cloneConf.setInt(RCFile.RECORD_INTERVAL_CONF_STR, intervalRecordCount); + cloneConf.setInt(HiveConf.ConfVars.HIVE_RCFILE_RECORD_INTERVAL.varname, intervalRecordCount); RCFile.Writer writer = new RCFile.Writer(fs, cloneConf, testFile, null, codec); BytesRefArrayWritable bytes = new BytesRefArrayWritable(bytesArray.length); @@ -690,7 +690,7 @@ private void writeThenReadByRecordReader(int intervalRecordCount, fs.delete(testFile, true); Configuration cloneConf = new Configuration(conf); RCFileOutputFormat.setColumnNumber(cloneConf, bytesArray.length); - cloneConf.setInt(RCFile.RECORD_INTERVAL_CONF_STR, intervalRecordCount); + cloneConf.setInt(HiveConf.ConfVars.HIVE_RCFILE_RECORD_INTERVAL.varname, intervalRecordCount); RCFile.Writer writer = new RCFile.Writer(fs, cloneConf, testFile, null, codec); diff --git ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q index 6612fe8..7f71305 100644 --- ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q +++ ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q @@ -1,10 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.txn.testing=true; create table T1(key string, val string) stored as textfile; -set hive.txn.testing=true; alter table T1 compact 'major'; alter table T1 compact 'minor'; diff --git ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q index 599cad9..4759d65 100644 --- ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q +++ ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.txn.testing=true; create table T1(key string, val string) partitioned by (ds string) stored as textfile; diff --git ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q index 871d292..23b3959 100644 --- ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q +++ ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.txn.testing=true; create database D1; diff --git ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q index 7c71fdd..30b26f4 100644 --- ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q +++ ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.txn.testing=true; show locks; diff --git ql/src/test/queries/clientpositive/index_bitmap_compression.q ql/src/test/queries/clientpositive/index_bitmap_compression.q index 4e93275..91d04ec 100644 --- ql/src/test/queries/clientpositive/index_bitmap_compression.q +++ ql/src/test/queries/clientpositive/index_bitmap_compression.q @@ -1,5 +1,5 @@ set hive.stats.dbclass=fs; -SET hive.exec.compress.result=true; +SET hive.exec.compress.output=true; -- SORT_QUERY_RESULTS diff --git ql/src/test/queries/clientpositive/index_compression.q ql/src/test/queries/clientpositive/index_compression.q index 1bb29a5..0702a2b 100644 --- ql/src/test/queries/clientpositive/index_compression.q +++ ql/src/test/queries/clientpositive/index_compression.q @@ -1,4 +1,4 @@ -SET hive.exec.compress.result=true; +SET hive.exec.compress.output=true; SET hive.stats.dbclass=fs; -- SORT_QUERY_RESULTS diff --git ql/src/test/queries/clientpositive/join25.q ql/src/test/queries/clientpositive/join25.q index 75f542d..b093d69 100644 --- ql/src/test/queries/clientpositive/join25.q +++ ql/src/test/queries/clientpositive/join25.q @@ -1,5 +1,3 @@ -set hive.mapjoin.numrows = 2; - -- SORT_QUERY_RESULTS CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE; diff --git ql/src/test/queries/clientpositive/join36.q ql/src/test/queries/clientpositive/join36.q index dd99d44..ed71439 100644 --- ql/src/test/queries/clientpositive/join36.q +++ ql/src/test/queries/clientpositive/join36.q @@ -1,5 +1,3 @@ -set hive.mapjoin.numrows = 2; - -- SORT_QUERY_RESULTS CREATE TABLE tmp1(key INT, cnt INT); diff --git ql/src/test/queries/clientpositive/join37.q ql/src/test/queries/clientpositive/join37.q index dc57d3a..e029415 100644 --- ql/src/test/queries/clientpositive/join37.q +++ ql/src/test/queries/clientpositive/join37.q @@ -1,5 +1,3 @@ -set hive.mapjoin.numrows = 2; - -- SORT_QUERY_RESULTS CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE; diff --git ql/src/test/queries/clientpositive/join_nulls.q ql/src/test/queries/clientpositive/join_nulls.q index 6c8ad10..6a2a7df 100644 --- ql/src/test/queries/clientpositive/join_nulls.q +++ ql/src/test/queries/clientpositive/join_nulls.q @@ -49,8 +49,8 @@ LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table smb_input1; LOAD DATA LOCAL INPATH '../../data/files/in1.txt' into table smb_input2; LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table smb_input2; -SET hive.optimize.bucketmapJOIN = true; -SET hive.optimize.bucketmapJOIN.sortedmerge = true; +SET hive.optimize.bucketmapjoin = true; +SET hive.optimize.bucketmapjoin.sortedmerge = true; SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key = b.key; diff --git ql/src/test/queries/clientpositive/join_nullsafe.q ql/src/test/queries/clientpositive/join_nullsafe.q index 7c3d1e8..78a65d6 100644 --- ql/src/test/queries/clientpositive/join_nullsafe.q +++ ql/src/test/queries/clientpositive/join_nullsafe.q @@ -1,5 +1,3 @@ -set hive.nullsafe.equijoin=true; - -- SORT_QUERY_RESULTS CREATE TABLE myinput1(key int, value int); @@ -30,16 +28,23 @@ SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key<=>b.value; SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value; SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value; +CREATE TABLE smb_input(key int, value int); +LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input; +LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input; + +set hive.enforce.sorting = true; +set hive.enforce.bucketing = true; + -- smbs CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; -LOAD DATA LOCAL INPATH '../../data/files/in8.txt' into table smb_input1; -LOAD DATA LOCAL INPATH '../../data/files/in9.txt' into table smb_input1; -LOAD DATA LOCAL INPATH '../../data/files/in8.txt' into table smb_input2; -LOAD DATA LOCAL INPATH '../../data/files/in9.txt' into table smb_input2; -SET hive.optimize.bucketmapJOIN = true; -SET hive.optimize.bucketmapJOIN.sortedmerge = true; +from smb_input +insert overwrite table smb_input1 select * +insert overwrite table smb_input2 select *; + +SET hive.optimize.bucketmapjoin = true; +SET hive.optimize.bucketmapjoin.sortedmerge = true; SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key; diff --git ql/src/test/queries/clientpositive/metadata_export_drop.q ql/src/test/queries/clientpositive/metadata_export_drop.q index e2da61a..2abc4cc 100644 --- ql/src/test/queries/clientpositive/metadata_export_drop.q +++ ql/src/test/queries/clientpositive/metadata_export_drop.q @@ -2,7 +2,7 @@ create table tmp_meta_export_listener_drop_test (foo string); dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/data/exports/HIVE-3427; set hive.metastore.pre.event.listeners=org.apache.hadoop.hive.ql.parse.MetaDataExportListener; set hive.metadata.export.location=${system:test.tmp.dir}/data/exports/HIVE-3427; -set hive.move.exported.metadata.to.trash=false; +set hive.metadata.move.exported.metadata.to.trash=false; drop table tmp_meta_export_listener_drop_test; dfs -rmr ${system:test.tmp.dir}/data/exports/HIVE-3427; set hive.metastore.pre.event.listeners=; diff --git ql/src/test/queries/clientpositive/overridden_confs.q ql/src/test/queries/clientpositive/overridden_confs.q index 9dcaed6..c7e0551 100644 --- ql/src/test/queries/clientpositive/overridden_confs.q +++ ql/src/test/queries/clientpositive/overridden_confs.q @@ -1,4 +1,4 @@ set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyOverriddenConfigsHook; -set hive.config.doesnt.exit=abc; +set some.hive.config.doesnt.exit=abc; select count(*) from src; diff --git ql/src/test/queries/clientpositive/quotedid_skew.q ql/src/test/queries/clientpositive/quotedid_skew.q index 5c95967..7088ea1 100644 --- ql/src/test/queries/clientpositive/quotedid_skew.q +++ ql/src/test/queries/clientpositive/quotedid_skew.q @@ -2,7 +2,6 @@ set hive.support.quoted.identifiers=column; set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(`!@#$%^&*()_q` string, `y&y` string) diff --git ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q index fc07742..164dca3 100644 --- ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q +++ ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q @@ -1,4 +1,3 @@ -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; set hive.mapred.supports.subdirectories=true; diff --git ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q index 50cfc61..9a09849 100644 --- ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q +++ ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q @@ -1,4 +1,3 @@ -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; set hive.mapred.supports.subdirectories=true; diff --git ql/src/test/queries/clientpositive/skewjoinopt1.q ql/src/test/queries/clientpositive/skewjoinopt1.q index 504ba8b..1f72a80 100644 --- ql/src/test/queries/clientpositive/skewjoinopt1.q +++ ql/src/test/queries/clientpositive/skewjoinopt1.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) diff --git ql/src/test/queries/clientpositive/skewjoinopt10.q ql/src/test/queries/clientpositive/skewjoinopt10.q index f35af90..eba7f9a 100644 --- ql/src/test/queries/clientpositive/skewjoinopt10.q +++ ql/src/test/queries/clientpositive/skewjoinopt10.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE; diff --git ql/src/test/queries/clientpositive/skewjoinopt11.q ql/src/test/queries/clientpositive/skewjoinopt11.q index 9e00bdc..6e54202 100644 --- ql/src/test/queries/clientpositive/skewjoinopt11.q +++ ql/src/test/queries/clientpositive/skewjoinopt11.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) diff --git ql/src/test/queries/clientpositive/skewjoinopt12.q ql/src/test/queries/clientpositive/skewjoinopt12.q index 1719950..edf19aa 100644 --- ql/src/test/queries/clientpositive/skewjoinopt12.q +++ ql/src/test/queries/clientpositive/skewjoinopt12.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) diff --git ql/src/test/queries/clientpositive/skewjoinopt13.q ql/src/test/queries/clientpositive/skewjoinopt13.q index 5ef217c..9511975 100644 --- ql/src/test/queries/clientpositive/skewjoinopt13.q +++ ql/src/test/queries/clientpositive/skewjoinopt13.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; diff --git ql/src/test/queries/clientpositive/skewjoinopt14.q ql/src/test/queries/clientpositive/skewjoinopt14.q index df1a26b..1bb01b0 100644 --- ql/src/test/queries/clientpositive/skewjoinopt14.q +++ ql/src/test/queries/clientpositive/skewjoinopt14.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) diff --git ql/src/test/queries/clientpositive/skewjoinopt15.q ql/src/test/queries/clientpositive/skewjoinopt15.q index 1db5472..8aeabf5 100644 --- ql/src/test/queries/clientpositive/skewjoinopt15.q +++ ql/src/test/queries/clientpositive/skewjoinopt15.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE; diff --git ql/src/test/queries/clientpositive/skewjoinopt16.q ql/src/test/queries/clientpositive/skewjoinopt16.q index 915de61..18dc87f 100644 --- ql/src/test/queries/clientpositive/skewjoinopt16.q +++ ql/src/test/queries/clientpositive/skewjoinopt16.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) diff --git ql/src/test/queries/clientpositive/skewjoinopt17.q ql/src/test/queries/clientpositive/skewjoinopt17.q index 2ee79cc..a2f5c8a 100644 --- ql/src/test/queries/clientpositive/skewjoinopt17.q +++ ql/src/test/queries/clientpositive/skewjoinopt17.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) diff --git ql/src/test/queries/clientpositive/skewjoinopt18.q ql/src/test/queries/clientpositive/skewjoinopt18.q index 9d06cc0..4999aaf 100644 --- ql/src/test/queries/clientpositive/skewjoinopt18.q +++ ql/src/test/queries/clientpositive/skewjoinopt18.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE; diff --git ql/src/test/queries/clientpositive/skewjoinopt19.q ql/src/test/queries/clientpositive/skewjoinopt19.q index 075645f..94f4551 100644 --- ql/src/test/queries/clientpositive/skewjoinopt19.q +++ ql/src/test/queries/clientpositive/skewjoinopt19.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) diff --git ql/src/test/queries/clientpositive/skewjoinopt2.q ql/src/test/queries/clientpositive/skewjoinopt2.q index f7acaad..74c855c 100644 --- ql/src/test/queries/clientpositive/skewjoinopt2.q +++ ql/src/test/queries/clientpositive/skewjoinopt2.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) diff --git ql/src/test/queries/clientpositive/skewjoinopt20.q ql/src/test/queries/clientpositive/skewjoinopt20.q index 9b908ce..cc21d7b 100644 --- ql/src/test/queries/clientpositive/skewjoinopt20.q +++ ql/src/test/queries/clientpositive/skewjoinopt20.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) diff --git ql/src/test/queries/clientpositive/skewjoinopt3.q ql/src/test/queries/clientpositive/skewjoinopt3.q index 22ea4f0..889f289 100644 --- ql/src/test/queries/clientpositive/skewjoinopt3.q +++ ql/src/test/queries/clientpositive/skewjoinopt3.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) diff --git ql/src/test/queries/clientpositive/skewjoinopt4.q ql/src/test/queries/clientpositive/skewjoinopt4.q index 8496b1a..7e1303e 100644 --- ql/src/test/queries/clientpositive/skewjoinopt4.q +++ ql/src/test/queries/clientpositive/skewjoinopt4.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) diff --git ql/src/test/queries/clientpositive/skewjoinopt5.q ql/src/test/queries/clientpositive/skewjoinopt5.q index 152de5b..75d8e92 100644 --- ql/src/test/queries/clientpositive/skewjoinopt5.q +++ ql/src/test/queries/clientpositive/skewjoinopt5.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) diff --git ql/src/test/queries/clientpositive/skewjoinopt6.q ql/src/test/queries/clientpositive/skewjoinopt6.q index 2e261bd..5a7580e 100644 --- ql/src/test/queries/clientpositive/skewjoinopt6.q +++ ql/src/test/queries/clientpositive/skewjoinopt6.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) diff --git ql/src/test/queries/clientpositive/skewjoinopt7.q ql/src/test/queries/clientpositive/skewjoinopt7.q index e4d9605..b9a4c49 100644 --- ql/src/test/queries/clientpositive/skewjoinopt7.q +++ ql/src/test/queries/clientpositive/skewjoinopt7.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) diff --git ql/src/test/queries/clientpositive/skewjoinopt8.q ql/src/test/queries/clientpositive/skewjoinopt8.q index 85746d9..e8d98ad 100644 --- ql/src/test/queries/clientpositive/skewjoinopt8.q +++ ql/src/test/queries/clientpositive/skewjoinopt8.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; diff --git ql/src/test/queries/clientpositive/skewjoinopt9.q ql/src/test/queries/clientpositive/skewjoinopt9.q index 889ab6c..b5d06a3 100644 --- ql/src/test/queries/clientpositive/skewjoinopt9.q +++ ql/src/test/queries/clientpositive/skewjoinopt9.q @@ -1,5 +1,4 @@ set hive.mapred.supports.subdirectories=true; -set hive.internal.ddl.list.bucketing.enable=true; set hive.optimize.skewjoin.compiletime = true; CREATE TABLE T1(key STRING, val STRING) diff --git ql/src/test/queries/clientpositive/smb_mapjoin_25.q ql/src/test/queries/clientpositive/smb_mapjoin_25.q index e43174b..683341b 100644 --- ql/src/test/queries/clientpositive/smb_mapjoin_25.q +++ ql/src/test/queries/clientpositive/smb_mapjoin_25.q @@ -22,11 +22,9 @@ set hive.optimize.bucketmapjoin.sortedmerge=true; set hive.mapred.reduce.tasks.speculative.execution=false; set hive.auto.convert.join=true; set hive.auto.convert.sortmerge.join=true; -set hive.auto.convert.sortmerge.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000000000; set hive.optimize.reducededuplication.min.reducer=1; -set hive.optimize.mapjoin.mapreduce=true; set hive.auto.convert.sortmerge.join.bigtable.selection.policy=org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSelectorForAutoSMJ; -- explain diff --git ql/src/test/queries/clientpositive/stats15.q ql/src/test/queries/clientpositive/stats15.q index 9a557c6..7dffa9c 100644 --- ql/src/test/queries/clientpositive/stats15.q +++ ql/src/test/queries/clientpositive/stats15.q @@ -1,5 +1,5 @@ set datanucleus.cache.collections=false; -set hive.stats.collect.uncompressedsize=false; +set hive.stats.collect.rawdatasize=false; create table stats_src like src; insert overwrite table stats_src select * from src; diff --git ql/src/test/queries/clientpositive/truncate_table.q ql/src/test/queries/clientpositive/truncate_table.q index 975c0f1..769b10f 100644 --- ql/src/test/queries/clientpositive/truncate_table.q +++ ql/src/test/queries/clientpositive/truncate_table.q @@ -12,7 +12,7 @@ load data local inpath '../../data/files/kv1.txt' into table srcpart_truncate pa load data local inpath '../../data/files/kv1.txt' into table srcpart_truncate partition (ds='2008-04-09', hr='11'); load data local inpath '../../data/files/kv1.txt' into table srcpart_truncate partition (ds='2008-04-09', hr='12'); -set hive.fetch.task.convertion=more; +set hive.fetch.task.conversion=more; -- truncate non-partitioned table explain TRUNCATE TABLE src_truncate; diff --git ql/src/test/queries/clientpositive/udtf_explode.q ql/src/test/queries/clientpositive/udtf_explode.q index 1d405b3..50f5f5e 100644 --- ql/src/test/queries/clientpositive/udtf_explode.q +++ ql/src/test/queries/clientpositive/udtf_explode.q @@ -22,6 +22,7 @@ SELECT src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3 -- HIVE-4295 SELECT BLOCK__OFFSET__INSIDE__FILE, src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3; -set hive.optimize.cp=false; -SELECT src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3; -SELECT BLOCK__OFFSET__INSIDE__FILE, src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3; +-- cp knob is removed, hardly convincible +-- set hive.optimize.cp=false; +-- SELECT src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3; +-- SELECT BLOCK__OFFSET__INSIDE__FILE, src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3; \ No newline at end of file diff --git ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q index d8b3d1a..54c906e 100644 --- ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q +++ ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q @@ -5,8 +5,8 @@ CREATE TABLE decimal_mapjoin STORED AS ORC AS FROM alltypesorc; SET hive.auto.convert.join=true; -SET hive.auto.convert.join.nonconditionaltask=true; -SET hive.auto.convert.join.nonconditionaltask.size=1000000000; +SET hive.auto.convert.join.noconditionaltask=true; +SET hive.auto.convert.join.noconditionaltask.size=1000000000; SET hive.vectorized.execution.enabled=true; EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 diff --git ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q index e309713..ef1b816 100644 --- ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q +++ ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q @@ -25,7 +25,6 @@ insert into table vsmb_bucket_TXT select cint, cstring1 from alltypesorc limit 2 set hive.vectorized.execution.enabled=true; set hive.optimize.bucketmapjoin = true; set hive.optimize.bucketmapjoin.sortedmerge = true; -set hive.auto.convert.sortmerge.join.noconditionaltask = true; set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; explain diff --git ql/src/test/queries/clientpositive/vectorized_mapjoin.q ql/src/test/queries/clientpositive/vectorized_mapjoin.q index f390c2c..e5e15ab 100644 --- ql/src/test/queries/clientpositive/vectorized_mapjoin.q +++ ql/src/test/queries/clientpositive/vectorized_mapjoin.q @@ -1,7 +1,7 @@ SET hive.vectorized.execution.enabled=true; SET hive.auto.convert.join=true; -SET hive.auto.convert.join.nonconditionaltask=true; -SET hive.auto.convert.join.nonconditionaltask.size=1000000000; +SET hive.auto.convert.join.noconditionaltask=true; +SET hive.auto.convert.join.noconditionaltask.size=1000000000; EXPLAIN SELECT COUNT(t1.cint), MAX(t2.cint), MIN(t1.cint), AVG(t1.cint+t2.cint) FROM alltypesorc t1 diff --git ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q index ce4227c..e7e4b33 100644 --- ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q +++ ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q @@ -1,7 +1,7 @@ SET hive.vectorized.execution.enabled=true; SET hive.auto.convert.join=true; -SET hive.auto.convert.join.nonconditionaltask=true; -SET hive.auto.convert.join.nonconditionaltask.size=1000000000; +SET hive.auto.convert.join.noconditionaltask=true; +SET hive.auto.convert.join.noconditionaltask.size=1000000000; explain select sum(t1.td) from (select v1.csmallint as tsi, v1.cdouble as td from alltypesorc v1, alltypesorc v2 where v1.ctinyint=v2.ctinyint) t1 join alltypesorc v3 on t1.tsi=v3.csmallint; diff --git ql/src/test/results/clientnegative/set_hiveconf_validation0.q.out ql/src/test/results/clientnegative/set_hiveconf_validation0.q.out index d309697..b792a67 100644 --- ql/src/test/results/clientnegative/set_hiveconf_validation0.q.out +++ ql/src/test/results/clientnegative/set_hiveconf_validation0.q.out @@ -8,4 +8,4 @@ POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src key string default value string default -Query returned non-zero code: 1, cause: 'SET hive.join.cache.size=test' FAILED because hive.join.cache.size expects an INT value. +Query returned non-zero code: 1, cause: 'SET hive.join.cache.size=test' FAILED because hive.join.cache.size expects INT type value. diff --git ql/src/test/results/clientnegative/set_hiveconf_validation1.q.out ql/src/test/results/clientnegative/set_hiveconf_validation1.q.out index 9f24b8e..b55ad50 100644 --- ql/src/test/results/clientnegative/set_hiveconf_validation1.q.out +++ ql/src/test/results/clientnegative/set_hiveconf_validation1.q.out @@ -8,4 +8,4 @@ POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src key string default value string default -Query returned non-zero code: 1, cause: 'SET hive.map.aggr.hash.min.reduction=false' FAILED because hive.map.aggr.hash.min.reduction expects an FLOAT value. +Query returned non-zero code: 1, cause: 'SET hive.map.aggr.hash.min.reduction=false' FAILED because hive.map.aggr.hash.min.reduction expects FLOAT type value. diff --git ql/src/test/results/clientpositive/index_bitmap_compression.q.out ql/src/test/results/clientpositive/index_bitmap_compression.q.out index 1f31e60..1217215 100644 --- ql/src/test/results/clientpositive/index_bitmap_compression.q.out +++ ql/src/test/results/clientpositive/index_bitmap_compression.q.out @@ -90,7 +90,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE File Output Operator - compressed: false + compressed: true Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/index_compression.q.out ql/src/test/results/clientpositive/index_compression.q.out index 2ccabce..ab0a3a6 100644 --- ql/src/test/results/clientpositive/index_compression.q.out +++ ql/src/test/results/clientpositive/index_compression.q.out @@ -84,7 +84,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE File Output Operator - compressed: false + compressed: true Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/join_nullsafe.q.out ql/src/test/results/clientpositive/join_nullsafe.q.out index a729af7..37b6978 100644 --- ql/src/test/results/clientpositive/join_nullsafe.q.out +++ ql/src/test/results/clientpositive/join_nullsafe.q.out @@ -555,6 +555,29 @@ NULL 35 NULL NULL NULL NULL 10 NULL NULL NULL 48 NULL NULL NULL NULL NULL +PREHOOK: query: CREATE TABLE smb_input(key int, value int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: CREATE TABLE smb_input(key int, value int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@smb_input +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@smb_input +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@smb_input +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@smb_input +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@smb_input PREHOOK: query: -- smbs CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE @@ -571,38 +594,24 @@ POSTHOOK: query: CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@smb_input2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in8.txt' into table smb_input1 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@smb_input1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in8.txt' into table smb_input1 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@smb_input1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in9.txt' into table smb_input1 -PREHOOK: type: LOAD -#### A masked pattern was here #### +PREHOOK: query: from smb_input +insert overwrite table smb_input1 select * +insert overwrite table smb_input2 select * +PREHOOK: type: QUERY +PREHOOK: Input: default@smb_input PREHOOK: Output: default@smb_input1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in9.txt' into table smb_input1 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@smb_input1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in8.txt' into table smb_input2 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@smb_input2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in8.txt' into table smb_input2 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@smb_input2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in9.txt' into table smb_input2 -PREHOOK: type: LOAD -#### A masked pattern was here #### PREHOOK: Output: default@smb_input2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in9.txt' into table smb_input2 -POSTHOOK: type: LOAD -#### A masked pattern was here #### +POSTHOOK: query: from smb_input +insert overwrite table smb_input1 select * +insert overwrite table smb_input2 select * +POSTHOOK: type: QUERY +POSTHOOK: Input: default@smb_input +POSTHOOK: Output: default@smb_input1 POSTHOOK: Output: default@smb_input2 +POSTHOOK: Lineage: smb_input1.key SIMPLE [(smb_input)smb_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_input1.value SIMPLE [(smb_input)smb_input.FieldSchema(name:value, type:int, comment:null), ] +POSTHOOK: Lineage: smb_input2.key SIMPLE [(smb_input)smb_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_input2.value SIMPLE [(smb_input)smb_input.FieldSchema(name:value, type:int, comment:null), ] PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key PREHOOK: type: QUERY PREHOOK: Input: default@smb_input1 @@ -611,48 +620,60 @@ POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_input1 #### A masked pattern was here #### -10 NULL 10 NULL +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 1000 +10 100 10 1000 +10 1000 10 100 +10 1000 10 100 +10 1000 10 1000 100 100 100 100 -110 NULL 110 NULL -148 NULL 148 NULL -200 200 200 200 -48 NULL 48 NULL -NULL 10 NULL 10 -NULL 10 NULL 110 -NULL 10 NULL 135 -NULL 10 NULL 35 -NULL 10 NULL NULL -NULL 10 NULL NULL -NULL 110 NULL 10 -NULL 110 NULL 110 -NULL 110 NULL 135 -NULL 110 NULL 35 -NULL 110 NULL NULL -NULL 110 NULL NULL -NULL 135 NULL 10 -NULL 135 NULL 110 -NULL 135 NULL 135 -NULL 135 NULL 35 -NULL 135 NULL NULL -NULL 135 NULL NULL -NULL 35 NULL 10 -NULL 35 NULL 110 -NULL 35 NULL 135 +12 100 12 100 +12 100 12 NULL +12 NULL 12 100 +12 NULL 12 NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 NULL 10050 +NULL 10050 NULL 35 +NULL 10050 NULL NULL +NULL 35 NULL 10050 NULL 35 NULL 35 NULL 35 NULL NULL -NULL 35 NULL NULL -NULL NULL NULL 10 -NULL NULL NULL 10 -NULL NULL NULL 110 -NULL NULL NULL 110 -NULL NULL NULL 135 -NULL NULL NULL 135 -NULL NULL NULL 35 +NULL NULL NULL 10050 NULL NULL NULL 35 NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key AND a.value <=> b.value PREHOOK: type: QUERY PREHOOK: Input: default@smb_input1 @@ -661,20 +682,48 @@ POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_input1 #### A masked pattern was here #### -10 NULL 10 NULL +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 1000 10 1000 100 100 100 100 -110 NULL 110 NULL -148 NULL 148 NULL -200 200 200 200 -48 NULL 48 NULL -NULL 10 NULL 10 -NULL 110 NULL 110 -NULL 135 NULL 135 +12 100 12 100 +12 NULL 12 NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 NULL 10050 NULL 35 NULL 35 NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input1 b ON a.key <=> b.key PREHOOK: type: QUERY PREHOOK: Input: default@smb_input1 @@ -683,48 +732,60 @@ POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN s POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_input1 #### A masked pattern was here #### -10 NULL 10 NULL +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 1000 +10 100 10 1000 +10 1000 10 100 +10 1000 10 100 +10 1000 10 1000 100 100 100 100 -110 NULL 110 NULL -148 NULL 148 NULL -200 200 200 200 -48 NULL 48 NULL -NULL 10 NULL 10 -NULL 10 NULL 110 -NULL 10 NULL 135 -NULL 10 NULL 35 -NULL 10 NULL NULL -NULL 10 NULL NULL -NULL 110 NULL 10 -NULL 110 NULL 110 -NULL 110 NULL 135 -NULL 110 NULL 35 -NULL 110 NULL NULL -NULL 110 NULL NULL -NULL 135 NULL 10 -NULL 135 NULL 110 -NULL 135 NULL 135 -NULL 135 NULL 35 -NULL 135 NULL NULL -NULL 135 NULL NULL -NULL 35 NULL 10 -NULL 35 NULL 110 -NULL 35 NULL 135 +12 100 12 100 +12 100 12 NULL +12 NULL 12 100 +12 NULL 12 NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 NULL 10050 +NULL 10050 NULL 35 +NULL 10050 NULL NULL +NULL 35 NULL 10050 NULL 35 NULL 35 NULL 35 NULL NULL -NULL 35 NULL NULL -NULL NULL NULL 10 -NULL NULL NULL 10 -NULL NULL NULL 110 -NULL NULL NULL 110 -NULL NULL NULL 135 -NULL NULL NULL 135 -NULL NULL NULL 35 +NULL NULL NULL 10050 NULL NULL NULL 35 NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key PREHOOK: type: QUERY PREHOOK: Input: default@smb_input1 @@ -733,47 +794,59 @@ POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_input1 #### A masked pattern was here #### -10 NULL 10 NULL +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 1000 +10 100 10 1000 +10 1000 10 100 +10 1000 10 100 +10 1000 10 1000 100 100 100 100 -110 NULL 110 NULL -148 NULL 148 NULL -200 200 200 200 -48 NULL 48 NULL -NULL 10 NULL 10 -NULL 10 NULL 110 -NULL 10 NULL 135 -NULL 10 NULL 35 -NULL 10 NULL NULL -NULL 10 NULL NULL -NULL 110 NULL 10 -NULL 110 NULL 110 -NULL 110 NULL 135 -NULL 110 NULL 35 -NULL 110 NULL NULL -NULL 110 NULL NULL -NULL 135 NULL 10 -NULL 135 NULL 110 -NULL 135 NULL 135 -NULL 135 NULL 35 -NULL 135 NULL NULL -NULL 135 NULL NULL -NULL 35 NULL 10 -NULL 35 NULL 110 -NULL 35 NULL 135 +12 100 12 100 +12 100 12 NULL +12 NULL 12 100 +12 NULL 12 NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 NULL 10050 +NULL 10050 NULL 35 +NULL 10050 NULL NULL +NULL 35 NULL 10050 NULL 35 NULL 35 NULL 35 NULL NULL -NULL 35 NULL NULL -NULL NULL NULL 10 -NULL NULL NULL 10 -NULL NULL NULL 110 -NULL NULL NULL 110 -NULL NULL NULL 135 -NULL NULL NULL 135 +NULL NULL NULL 10050 NULL NULL NULL 35 -NULL NULL NULL 35 -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL NULL NULL NULL NULL PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input1 b ON a.key <=> b.key PREHOOK: type: QUERY @@ -783,47 +856,59 @@ POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN sm POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_input1 #### A masked pattern was here #### -10 NULL 10 NULL +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 1000 +10 100 10 1000 +10 1000 10 100 +10 1000 10 100 +10 1000 10 1000 100 100 100 100 -110 NULL 110 NULL -148 NULL 148 NULL -200 200 200 200 -48 NULL 48 NULL -NULL 10 NULL 10 -NULL 10 NULL 110 -NULL 10 NULL 135 -NULL 10 NULL 35 -NULL 10 NULL NULL -NULL 10 NULL NULL -NULL 110 NULL 10 -NULL 110 NULL 110 -NULL 110 NULL 135 -NULL 110 NULL 35 -NULL 110 NULL NULL -NULL 110 NULL NULL -NULL 135 NULL 10 -NULL 135 NULL 110 -NULL 135 NULL 135 -NULL 135 NULL 35 -NULL 135 NULL NULL -NULL 135 NULL NULL -NULL 35 NULL 10 -NULL 35 NULL 110 -NULL 35 NULL 135 +12 100 12 100 +12 100 12 NULL +12 NULL 12 100 +12 NULL 12 NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 NULL 10050 +NULL 10050 NULL 35 +NULL 10050 NULL NULL +NULL 35 NULL 10050 NULL 35 NULL 35 NULL 35 NULL NULL -NULL 35 NULL NULL -NULL NULL NULL 10 -NULL NULL NULL 10 -NULL NULL NULL 110 -NULL NULL NULL 110 -NULL NULL NULL 135 -NULL NULL NULL 135 +NULL NULL NULL 10050 NULL NULL NULL 35 -NULL NULL NULL 35 -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL NULL NULL NULL NULL PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key <=> b.value PREHOOK: type: QUERY @@ -835,45 +920,16 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_input1 POSTHOOK: Input: default@smb_input2 #### A masked pattern was here #### -10 NULL NULL 10 +100 100 10 100 +100 100 10 100 100 100 100 100 -110 NULL NULL 110 -200 200 200 200 -NULL 10 10 NULL -NULL 10 110 NULL -NULL 10 148 NULL -NULL 10 48 NULL -NULL 10 NULL NULL -NULL 10 NULL NULL -NULL 110 10 NULL -NULL 110 110 NULL -NULL 110 148 NULL -NULL 110 48 NULL -NULL 110 NULL NULL -NULL 110 NULL NULL -NULL 135 10 NULL -NULL 135 110 NULL -NULL 135 148 NULL -NULL 135 48 NULL -NULL 135 NULL NULL -NULL 135 NULL NULL -NULL 35 10 NULL -NULL 35 110 NULL -NULL 35 148 NULL -NULL 35 48 NULL -NULL 35 NULL NULL +100 100 12 100 +35 10035 NULL 35 +NULL 10050 12 NULL +NULL 10050 NULL NULL +NULL 35 12 NULL NULL 35 NULL NULL -NULL NULL 10 NULL -NULL NULL 10 NULL -NULL NULL 110 NULL -NULL NULL 110 NULL -NULL NULL 148 NULL -NULL NULL 148 NULL -NULL NULL 48 NULL -NULL NULL 48 NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL +NULL NULL 12 NULL NULL NULL NULL NULL PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key <=> b.value PREHOOK: type: QUERY @@ -885,45 +941,16 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_input1 POSTHOOK: Input: default@smb_input2 #### A masked pattern was here #### -10 NULL NULL 10 +100 100 10 100 +100 100 10 100 100 100 100 100 -110 NULL NULL 110 -200 200 200 200 -NULL 10 10 NULL -NULL 10 110 NULL -NULL 10 148 NULL -NULL 10 48 NULL -NULL 10 NULL NULL -NULL 10 NULL NULL -NULL 110 10 NULL -NULL 110 110 NULL -NULL 110 148 NULL -NULL 110 48 NULL -NULL 110 NULL NULL -NULL 110 NULL NULL -NULL 135 10 NULL -NULL 135 110 NULL -NULL 135 148 NULL -NULL 135 48 NULL -NULL 135 NULL NULL -NULL 135 NULL NULL -NULL 35 10 NULL -NULL 35 110 NULL -NULL 35 148 NULL -NULL 35 48 NULL +100 100 12 100 +35 10035 NULL 35 +NULL 10050 12 NULL +NULL 10050 NULL NULL +NULL 35 12 NULL NULL 35 NULL NULL -NULL 35 NULL NULL -NULL NULL 10 NULL -NULL NULL 10 NULL -NULL NULL 110 NULL -NULL NULL 110 NULL -NULL NULL 148 NULL -NULL NULL 148 NULL -NULL NULL 48 NULL -NULL NULL 48 NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL +NULL NULL 12 NULL NULL NULL NULL NULL PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input2 b ON a.key <=> b.value PREHOOK: type: QUERY @@ -935,47 +962,37 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_input1 POSTHOOK: Input: default@smb_input2 #### A masked pattern was here #### -10 NULL NULL 10 +10 100 NULL NULL +10 100 NULL NULL +10 1000 NULL NULL +100 100 10 100 +100 100 10 100 100 100 100 100 -110 NULL NULL 110 -148 NULL NULL NULL -200 200 200 200 -48 NULL NULL NULL -NULL 10 10 NULL -NULL 10 110 NULL -NULL 10 148 NULL -NULL 10 48 NULL -NULL 10 NULL NULL -NULL 10 NULL NULL -NULL 110 10 NULL -NULL 110 110 NULL -NULL 110 148 NULL -NULL 110 48 NULL -NULL 110 NULL NULL -NULL 110 NULL NULL -NULL 135 10 NULL -NULL 135 110 NULL -NULL 135 148 NULL -NULL 135 48 NULL -NULL 135 NULL NULL -NULL 135 NULL NULL -NULL 35 10 NULL -NULL 35 110 NULL -NULL 35 148 NULL -NULL 35 48 NULL -NULL 35 NULL NULL +100 100 12 100 +12 100 NULL NULL +12 NULL NULL NULL +15 10015 NULL NULL +20 10020 NULL NULL +25 10025 NULL NULL +30 10030 NULL NULL +35 10035 NULL 35 +40 10040 NULL NULL +40 10040 NULL NULL +5 10005 NULL NULL +50 10050 NULL NULL +50 10050 NULL NULL +50 10050 NULL NULL +60 10040 NULL NULL +60 10040 NULL NULL +70 10040 NULL NULL +70 10040 NULL NULL +80 10040 NULL NULL +80 10040 NULL NULL +NULL 10050 12 NULL +NULL 10050 NULL NULL +NULL 35 12 NULL NULL 35 NULL NULL -NULL NULL 10 NULL -NULL NULL 10 NULL -NULL NULL 110 NULL -NULL NULL 110 NULL -NULL NULL 148 NULL -NULL NULL 148 NULL -NULL NULL 48 NULL -NULL NULL 48 NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL +NULL NULL 12 NULL NULL NULL NULL NULL PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input2 b ON a.key <=> b.value PREHOOK: type: QUERY @@ -987,47 +1004,35 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_input1 POSTHOOK: Input: default@smb_input2 #### A masked pattern was here #### -10 NULL NULL 10 +100 100 10 100 +100 100 10 100 100 100 100 100 -110 NULL NULL 110 -200 200 200 200 -NULL 10 10 NULL -NULL 10 110 NULL -NULL 10 148 NULL -NULL 10 48 NULL -NULL 10 NULL NULL -NULL 10 NULL NULL -NULL 110 10 NULL -NULL 110 110 NULL -NULL 110 148 NULL -NULL 110 48 NULL -NULL 110 NULL NULL -NULL 110 NULL NULL -NULL 135 10 NULL -NULL 135 110 NULL -NULL 135 148 NULL -NULL 135 48 NULL -NULL 135 NULL NULL -NULL 135 NULL NULL -NULL 35 10 NULL -NULL 35 110 NULL -NULL 35 148 NULL -NULL 35 48 NULL -NULL 35 NULL NULL +100 100 12 100 +35 10035 NULL 35 +NULL 10050 12 NULL +NULL 10050 NULL NULL +NULL 35 12 NULL NULL 35 NULL NULL -NULL NULL 10 NULL -NULL NULL 10 NULL -NULL NULL 110 NULL -NULL NULL 110 NULL -NULL NULL 148 NULL -NULL NULL 148 NULL -NULL NULL 48 NULL -NULL NULL 48 NULL -NULL NULL NULL 135 -NULL NULL NULL 35 -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL +NULL NULL 10 1000 +NULL NULL 12 NULL +NULL NULL 15 10015 +NULL NULL 20 10020 +NULL NULL 25 10025 +NULL NULL 30 10030 +NULL NULL 35 10035 +NULL NULL 40 10040 +NULL NULL 40 10040 +NULL NULL 5 10005 +NULL NULL 50 10050 +NULL NULL 50 10050 +NULL NULL 50 10050 +NULL NULL 60 10040 +NULL NULL 60 10040 +NULL NULL 70 10040 +NULL NULL 70 10040 +NULL NULL 80 10040 +NULL NULL 80 10040 +NULL NULL NULL 10050 NULL NULL NULL NULL PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value <=> b.value PREHOOK: type: QUERY @@ -1037,47 +1042,113 @@ POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a JOIN smb_input2 b POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_input2 #### A masked pattern was here #### -10 NULL 10 NULL -10 NULL 110 NULL -10 NULL 148 NULL -10 NULL 48 NULL -10 NULL NULL NULL -10 NULL NULL NULL +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 100 100 +10 100 100 100 +10 100 12 100 +10 100 12 100 +10 1000 10 1000 +100 100 10 100 +100 100 10 100 100 100 100 100 -110 NULL 10 NULL -110 NULL 110 NULL -110 NULL 148 NULL -110 NULL 48 NULL -110 NULL NULL NULL -110 NULL NULL NULL -148 NULL 10 NULL -148 NULL 110 NULL -148 NULL 148 NULL -148 NULL 48 NULL -148 NULL NULL NULL -148 NULL NULL NULL -200 200 200 200 -48 NULL 10 NULL -48 NULL 110 NULL -48 NULL 148 NULL -48 NULL 48 NULL -48 NULL NULL NULL -48 NULL NULL NULL -NULL 10 NULL 10 -NULL 110 NULL 110 -NULL 135 NULL 135 +100 100 12 100 +12 100 10 100 +12 100 10 100 +12 100 100 100 +12 100 12 100 +12 NULL 12 NULL +12 NULL NULL NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 NULL 10050 NULL 35 NULL 35 -NULL NULL 10 NULL -NULL NULL 10 NULL -NULL NULL 110 NULL -NULL NULL 110 NULL -NULL NULL 148 NULL -NULL NULL 148 NULL -NULL NULL 48 NULL -NULL NULL 48 NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL +NULL NULL 12 NULL NULL NULL NULL NULL PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a RIGHT OUTER JOIN smb_input2 b ON a.value <=> b.value PREHOOK: type: QUERY @@ -1087,47 +1158,113 @@ POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a RIGHT OUTER JOIN s POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_input2 #### A masked pattern was here #### -10 NULL 10 NULL -10 NULL 110 NULL -10 NULL 148 NULL -10 NULL 48 NULL -10 NULL NULL NULL -10 NULL NULL NULL +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 100 100 +10 100 100 100 +10 100 12 100 +10 100 12 100 +10 1000 10 1000 +100 100 10 100 +100 100 10 100 100 100 100 100 -110 NULL 10 NULL -110 NULL 110 NULL -110 NULL 148 NULL -110 NULL 48 NULL -110 NULL NULL NULL -110 NULL NULL NULL -148 NULL 10 NULL -148 NULL 110 NULL -148 NULL 148 NULL -148 NULL 48 NULL -148 NULL NULL NULL -148 NULL NULL NULL -200 200 200 200 -48 NULL 10 NULL -48 NULL 110 NULL -48 NULL 148 NULL -48 NULL 48 NULL -48 NULL NULL NULL -48 NULL NULL NULL -NULL 10 NULL 10 -NULL 110 NULL 110 -NULL 135 NULL 135 +100 100 12 100 +12 100 10 100 +12 100 10 100 +12 100 100 100 +12 100 12 100 +12 NULL 12 NULL +12 NULL NULL NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 NULL 10050 NULL 35 NULL 35 -NULL NULL 10 NULL -NULL NULL 10 NULL -NULL NULL 110 NULL -NULL NULL 110 NULL -NULL NULL 148 NULL -NULL NULL 148 NULL -NULL NULL 48 NULL -NULL NULL 48 NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL +NULL NULL 12 NULL NULL NULL NULL NULL PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value <=> b.value PREHOOK: type: QUERY @@ -1137,47 +1274,113 @@ POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a JOIN smb_input2 b POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_input2 #### A masked pattern was here #### -10 NULL 10 NULL -10 NULL 110 NULL -10 NULL 148 NULL -10 NULL 48 NULL -10 NULL NULL NULL -10 NULL NULL NULL +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 100 100 +10 100 100 100 +10 100 12 100 +10 100 12 100 +10 1000 10 1000 +100 100 10 100 +100 100 10 100 100 100 100 100 -110 NULL 10 NULL -110 NULL 110 NULL -110 NULL 148 NULL -110 NULL 48 NULL -110 NULL NULL NULL -110 NULL NULL NULL -148 NULL 10 NULL -148 NULL 110 NULL -148 NULL 148 NULL -148 NULL 48 NULL -148 NULL NULL NULL -148 NULL NULL NULL -200 200 200 200 -48 NULL 10 NULL -48 NULL 110 NULL -48 NULL 148 NULL -48 NULL 48 NULL -48 NULL NULL NULL -48 NULL NULL NULL -NULL 10 NULL 10 -NULL 110 NULL 110 -NULL 135 NULL 135 +100 100 12 100 +12 100 10 100 +12 100 10 100 +12 100 100 100 +12 100 12 100 +12 NULL 12 NULL +12 NULL NULL NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 NULL 10050 NULL 35 NULL 35 -NULL NULL 10 NULL -NULL NULL 10 NULL -NULL NULL 110 NULL -NULL NULL 110 NULL -NULL NULL 148 NULL -NULL NULL 148 NULL -NULL NULL 48 NULL -NULL NULL 48 NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL +NULL NULL 12 NULL NULL NULL NULL NULL PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a LEFT OUTER JOIN smb_input2 b ON a.value <=> b.value PREHOOK: type: QUERY @@ -1187,47 +1390,113 @@ POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a LEFT OUTER JOIN sm POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_input2 #### A masked pattern was here #### -10 NULL 10 NULL -10 NULL 110 NULL -10 NULL 148 NULL -10 NULL 48 NULL -10 NULL NULL NULL -10 NULL NULL NULL +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 100 100 +10 100 100 100 +10 100 12 100 +10 100 12 100 +10 1000 10 1000 +100 100 10 100 +100 100 10 100 100 100 100 100 -110 NULL 10 NULL -110 NULL 110 NULL -110 NULL 148 NULL -110 NULL 48 NULL -110 NULL NULL NULL -110 NULL NULL NULL -148 NULL 10 NULL -148 NULL 110 NULL -148 NULL 148 NULL -148 NULL 48 NULL -148 NULL NULL NULL -148 NULL NULL NULL -200 200 200 200 -48 NULL 10 NULL -48 NULL 110 NULL -48 NULL 148 NULL -48 NULL 48 NULL -48 NULL NULL NULL -48 NULL NULL NULL -NULL 10 NULL 10 -NULL 110 NULL 110 -NULL 135 NULL 135 +100 100 12 100 +12 100 10 100 +12 100 10 100 +12 100 100 100 +12 100 12 100 +12 NULL 12 NULL +12 NULL NULL NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 NULL 10050 NULL 35 NULL 35 -NULL NULL 10 NULL -NULL NULL 10 NULL -NULL NULL 110 NULL -NULL NULL 110 NULL -NULL NULL 148 NULL -NULL NULL 148 NULL -NULL NULL 48 NULL -NULL NULL 48 NULL -NULL NULL NULL NULL -NULL NULL NULL NULL -NULL NULL NULL NULL +NULL NULL 12 NULL NULL NULL NULL NULL PREHOOK: query: --HIVE-3315 join predicate transitive explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.key is NULL diff --git ql/src/test/results/clientpositive/overridden_confs.q.out ql/src/test/results/clientpositive/overridden_confs.q.out index f97d1ad..b86521b 100644 --- ql/src/test/results/clientpositive/overridden_confs.q.out +++ ql/src/test/results/clientpositive/overridden_confs.q.out @@ -3,5 +3,5 @@ PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### Key: hive.exec.post.hooks, Value: org.apache.hadoop.hive.ql.hooks.VerifyOverriddenConfigsHook -Key: hive.config.doesnt.exit, Value: abc +Key: some.hive.config.doesnt.exit, Value: abc 500 diff --git ql/src/test/results/clientpositive/stats15.q.out ql/src/test/results/clientpositive/stats15.q.out index 80e1c6d..5dcbc5a 100644 --- ql/src/test/results/clientpositive/stats15.q.out +++ ql/src/test/results/clientpositive/stats15.q.out @@ -45,7 +45,7 @@ Table Parameters: COLUMN_STATS_ACCURATE true numFiles 1 numRows 500 - rawDataSize 5312 + rawDataSize 0 totalSize 5812 #### A masked pattern was here #### @@ -185,7 +185,7 @@ Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 1 numRows 500 - rawDataSize 5312 + rawDataSize 0 totalSize 5812 #### A masked pattern was here #### @@ -227,7 +227,7 @@ Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 1 numRows 500 - rawDataSize 5312 + rawDataSize 0 totalSize 5812 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/udtf_explode.q.out ql/src/test/results/clientpositive/udtf_explode.q.out index 45de313..ae95907 100644 --- ql/src/test/results/clientpositive/udtf_explode.q.out +++ ql/src/test/results/clientpositive/udtf_explode.q.out @@ -764,25 +764,3 @@ POSTHOOK: Input: default@src 0 238 1 one 0 238 2 two 0 238 3 three -PREHOOK: query: SELECT src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -238 1 one -238 2 two -238 3 three -PREHOOK: query: SELECT BLOCK__OFFSET__INSIDE__FILE, src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT BLOCK__OFFSET__INSIDE__FILE, src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 238 1 one -0 238 2 two -0 238 3 three