diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 67e22f6649..01e9e04e5f 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -597,7 +597,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode."), DROPIGNORESNONEXISTENT("hive.exec.drop.ignorenonexistent", true, - "Do not report an error if DROP TABLE/VIEW/Index/Function specifies a non-existent table/view/index/function"), + "Do not report an error if DROP TABLE/VIEW/Index/Function specifies a non-existent table/view/function"), HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true, "Ignore the mapjoin hint"), @@ -1615,10 +1615,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "Whether to provide the row offset virtual column"), // Optimizer - HIVEOPTINDEXFILTER("hive.optimize.index.filter", false, - "Whether to enable automatic use of indexes"), - HIVEINDEXAUTOUPDATE("hive.optimize.index.autoupdate", false, - "Whether to update stale indexes automatically"), HIVEOPTPPD("hive.optimize.ppd", true, "Whether to enable predicate pushdown"), HIVEOPTPPD_WINDOWING("hive.optimize.ppd.windowing", true, @@ -1756,18 +1752,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "If the number of references to a CTE clause exceeds this threshold, Hive will materialize it\n" + "before executing the main query block. -1 will disable this feature."), - // Indexes - HIVEOPTINDEXFILTER_COMPACT_MINSIZE("hive.optimize.index.filter.compact.minsize", (long) 5 * 1024 * 1024 * 1024, - "Minimum size (in bytes) of the inputs on which a compact index is automatically used."), // 5G - HIVEOPTINDEXFILTER_COMPACT_MAXSIZE("hive.optimize.index.filter.compact.maxsize", (long) -1, - "Maximum size (in bytes) of the inputs on which a compact index is automatically used. A negative number is equivalent to infinity."), // infinity - HIVE_INDEX_COMPACT_QUERY_MAX_ENTRIES("hive.index.compact.query.max.entries", (long) 10000000, - "The maximum number of index entries to read during a query that uses the compact index. Negative value is equivalent to infinity."), // 10M - HIVE_INDEX_COMPACT_QUERY_MAX_SIZE("hive.index.compact.query.max.size", (long) 10 * 1024 * 1024 * 1024, - "The maximum number of bytes that a query using the compact index can read. Negative value is equivalent to infinity."), // 10G - HIVE_INDEX_COMPACT_BINARY_SEARCH("hive.index.compact.binary.search", true, - "Whether or not to use a binary search to find the entries in an index table that match the filter, where possible"), - // Statistics HIVE_STATS_ESTIMATE_STATS("hive.stats.estimate", true, "Estimate statistics in absence of statistics."), @@ -2135,9 +2119,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal // For har files HIVEARCHIVEENABLED("hive.archive.enabled", false, "Whether archiving operations are permitted"), - HIVEOPTGBYUSINGINDEX("hive.optimize.index.groupby", false, - "Whether to enable optimization of group-by queries using Aggregate indexes."), - HIVEFETCHTASKCONVERSION("hive.fetch.task.conversion", "more", new StringSet("none", "minimal", "more"), "Some select queries can be converted to single FETCH task minimizing latency.\n" + "Currently the query should be single sourced not having any subquery and should not have\n" + @@ -2261,12 +2242,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVE_ERROR_ON_EMPTY_PARTITION("hive.error.on.empty.partition", false, "Whether to throw an exception if dynamic partition insert generates empty results."), - HIVE_INDEX_COMPACT_FILE("hive.index.compact.file", "", "internal variable"), - HIVE_INDEX_BLOCKFILTER_FILE("hive.index.blockfilter.file", "", "internal variable"), - HIVE_INDEX_IGNORE_HDFS_LOC("hive.index.compact.file.ignore.hdfs", false, - "When true the HDFS location stored in the index file will be ignored at runtime.\n" + - "If the data got moved or the name of the cluster got changed, the index data should still be usable."), - HIVE_EXIM_URI_SCHEME_WL("hive.exim.uri.scheme.whitelist", "hdfs,pfile,file,s3,s3a", "A comma separated list of acceptable URI schemes for import and export."), // temporary variable for testing. This is added just to turn off this feature in case of a bug in @@ -2285,12 +2260,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVE_REWORK_MAPREDWORK("hive.rework.mapredwork", false, "should rework the mapred work or not.\n" + "This is first introduced by SymlinkTextInputFormat to replace symlink files with real paths at compile time."), - HIVE_CONCATENATE_CHECK_INDEX ("hive.exec.concatenate.check.index", true, - "If this is set to true, Hive will throw error when doing\n" + - "'alter table tbl_name [partSpec] concatenate' on a table/partition\n" + - "that has indexes on it. The reason the user want to set this to true\n" + - "is because it can help user to avoid handling all index drop, recreation,\n" + - "rebuild work. This is very helpful for tables with thousands of partitions."), HIVE_IO_EXCEPTION_HANDLERS("hive.io.exception.handlers", "", "A list of io exception handler class names. This is used\n" + "to construct a list exception handlers to handle exceptions thrown\n" + @@ -3069,7 +3038,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "hive.tez.bucket.pruning", false, "When pruning is enabled, filters on bucket columns will be processed by \n" + "filtering the splits against a bitset of included buckets. This needs predicates \n"+ - "produced by hive.optimize.ppd and hive.optimize.index.filters."), + "produced by hive.optimize.ppd."), TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT( "hive.tez.bucket.pruning.compat", true, "When pruning is enabled, handle possibly broken inserts due to negative hashcodes.\n" + @@ -4612,7 +4581,6 @@ public ZoneId getLocalTimeZone() { ConfVars.HIVE_CHECK_CROSS_PRODUCT.varname, ConfVars.HIVE_CLI_TEZ_SESSION_ASYNC.varname, ConfVars.HIVE_COMPAT.varname, - ConfVars.HIVE_CONCATENATE_CHECK_INDEX.varname, ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY.varname, ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION.varname, ConfVars.HIVE_EXECUTION_ENGINE.varname, diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index bec1f2663b..8105e8ba54 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -90,12 +90,6 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) case HiveParser.TOK_DESCDATABASE: case HiveParser.TOK_ALTERDATABASE_PROPERTIES: - // Index DDL - case HiveParser.TOK_ALTERINDEX_PROPERTIES: - case HiveParser.TOK_CREATEINDEX: - case HiveParser.TOK_DROPINDEX: - case HiveParser.TOK_SHOWINDEXES: - // View DDL // "alter view add partition" does not work because of the nature of implementation // of the DDL in hive. Hive will internally invoke another Driver on the select statement, @@ -174,11 +168,6 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, case HiveParser.TOK_DESCDATABASE: case HiveParser.TOK_ALTERDATABASE_PROPERTIES: - // Index DDL - case HiveParser.TOK_ALTERINDEX_PROPERTIES: - case HiveParser.TOK_CREATEINDEX: - case HiveParser.TOK_DROPINDEX: - case HiveParser.TOK_SHOWINDEXES: break; // View DDL diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java index 63a731316e..de3383355b 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.ql.exec.mr.ExecDriver; import org.apache.hadoop.hive.ql.metadata.*; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; @@ -240,24 +239,6 @@ private Table createTableAndCheck(Table baseTable, String tableName, String tabl return table; } - private void createIndexAndCheck(Table table, String indexName, String indexLocation) throws Exception { - executeQuery("CREATE INDEX " + indexName + " ON TABLE " + table.getTableName() - + " (col1) AS 'COMPACT' WITH DEFERRED REBUILD " - + buildLocationClause(indexLocation)); - Index index = db.getIndex(table.getTableName(), indexName); - assertNotNull("Index object is expected for " + indexName , index); - String location = index.getSd().getLocation(); - if (indexLocation != null) { - assertEquals("Index should be located in the second filesystem", - fs2.makeQualified(new Path(indexLocation)).toString(), location); - } - else { - // Since warehouse path is non-qualified the index should be located on second filesystem - assertEquals("Index should be located in the second filesystem", - fs2.getUri().getScheme(), new URI(location).getScheme()); - } - } - private void createDatabaseAndCheck(String databaseName, String databaseLocation) throws Exception { executeQuery("CREATE DATABASE " + databaseName + buildLocationClause(databaseLocation)); Database database = db.getDatabase(databaseName); @@ -274,41 +255,6 @@ private void createDatabaseAndCheck(String databaseName, String databaseLocation } } - public void testCreateTableWithIndexAndPartitionsNonDefaultNameNode() throws Exception { - assertTrue("Test suite should be initialied", isInitialized ); - final String tableLocation = tmppathFs2 + "/" + Table1Name; - final String table5Location = tmppathFs2 + "/" + Table5Name; - final String indexLocation = tmppathFs2 + "/" + Index1Name; - final String partition3Location = fs.makeQualified(new Path(tmppath + "/p3")).toString(); - - // Create table with absolute non-qualified path - Table table1 = createTableAndCheck(Table1Name, tableLocation); - - // Create table without location - createTableAndCheck(Table2Name, null); - - // Add partition without location - addPartitionAndCheck(table1, "p", "p1", null); - - // Add partition with absolute location - addPartitionAndCheck(table1, "p", "p2", tableLocation + "/p2"); - - // Add partition with qualified location in default fs - addPartitionAndCheck(table1, "p", "p3", partition3Location); - - // Create index with absolute non-qualified path - createIndexAndCheck(table1, Index1Name, indexLocation); - - // Create index with absolute non-qualified path - createIndexAndCheck(table1, Index2Name, null); - - // Create table like Table1Name absolute non-qualified path - createTableAndCheck(table1, Table5Name, table5Location); - - // Create table without location - createTableAndCheck(table1, Table6Name, null); - } - public void testAlterPartitionSetLocationNonDefaultNameNode() throws Exception { assertTrue("Test suite should have been initialized", isInitialized); String tableLocation = tmppathFs2 + "/" + "test_set_part_loc"; diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index fcce531e59..6cd7a136ae 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -95,12 +95,10 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hive.druid.MiniDruidCluster; -import org.apache.hive.testutils.HiveTestEnvSetup; import org.apache.hadoop.hive.llap.LlapItUtils; import org.apache.hadoop.hive.llap.daemon.MiniLlapCluster; import org.apache.hadoop.hive.llap.io.api.LlapProxy; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.Task; @@ -966,14 +964,6 @@ public void clearTablesCreatedDuringTests() throws Exception { continue; } db.dropTable(dbName, tblName, true, true, fsType == FsType.encrypted_hdfs); - } else { - // this table is defined in srcTables, drop all indexes on it - List indexes = db.getIndexes(dbName, tblName, (short)-1); - if (indexes != null && indexes.size() > 0) { - for (Index index : indexes) { - db.dropIndex(dbName, tblName, index.getIndexName(), true, true); - } - } } } if (!DEFAULT_DATABASE_NAME.equals(dbName)) { diff --git ql/src/java/org/apache/hadoop/hive/ql/Driver.java ql/src/java/org/apache/hadoop/hive/ql/Driver.java index c6f7d6459e..1471adde7d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -797,7 +797,6 @@ private boolean startImplicitTxn(HiveTxnManager txnManager) throws LockException case SHOWTABLES: case SHOWCOLUMNS: case SHOWFUNCTIONS: - case SHOWINDEXES: case SHOWPARTITIONS: case SHOWLOCKS: case SHOWVIEWS: diff --git ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 134faeeec9..758ae4396a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -62,7 +62,6 @@ //========================== 10000 range starts here ========================// INVALID_TABLE(10001, "Table not found", "42S02"), INVALID_COLUMN(10002, "Invalid column reference"), - INVALID_INDEX(10003, "Invalid index"), INVALID_TABLE_OR_COLUMN(10004, "Invalid table alias or column reference"), AMBIGUOUS_TABLE_OR_COLUMN(10005, "Ambiguous table alias or column reference"), INVALID_PARTITION(10006, "Partition not found"), @@ -326,7 +325,6 @@ TABLES_INCOMPATIBLE_SCHEMAS(10235, "Tables have incompatible schemas and their partitions " + " cannot be exchanged."), - TRUNCATE_COLUMN_INDEXED_TABLE(10236, "Can not truncate columns from table with indexes"), TRUNCATE_COLUMN_NOT_RC(10237, "Only RCFileFormat supports column truncation."), TRUNCATE_COLUMN_ARCHIVED(10238, "Column truncation cannot be performed on archived partitions."), TRUNCATE_BUCKETED_COLUMN(10239, @@ -426,7 +424,6 @@ "Grouping sets aggregations (with rollups or cubes) are not allowed when " + "HIVEMULTIGROUPBYSINGLEREDUCER is turned on. Set hive.multigroupby.singlereducer=false if you want to use grouping sets"), CANNOT_RETRIEVE_TABLE_METADATA(10316, "Error while retrieving table metadata"), - CANNOT_DROP_INDEX(10317, "Error while dropping index"), INVALID_AST_TREE(10318, "Internal error : Invalid AST"), ERROR_SERIALIZE_METASTORE(10319, "Error while serializing the metastore objects"), IO_ERROR(10320, "Error while performing IO operation "), diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 20c2c3294a..9356a30bf3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -51,7 +51,6 @@ import java.util.concurrent.ExecutionException; import com.google.common.collect.ImmutableSet; -import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -61,7 +60,6 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.Constants; @@ -76,7 +74,6 @@ import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.BasicTxnInfo; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.CompactionResponse; @@ -85,7 +82,6 @@ import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; -import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -108,7 +104,6 @@ import org.apache.hadoop.hive.metastore.api.TxnInfo; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; import org.apache.hadoop.hive.metastore.api.WMTrigger; import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; @@ -150,7 +145,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker; -import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.NotNullConstraint; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -172,7 +166,6 @@ import org.apache.hadoop.hive.ql.plan.AbortTxnsDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.AlterIndexDesc; import org.apache.hadoop.hive.ql.plan.AlterMaterializedViewDesc; import org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc; @@ -184,7 +177,6 @@ import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; import org.apache.hadoop.hive.ql.plan.ColStatistics; import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc; import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc; import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc; @@ -198,7 +190,6 @@ import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; import org.apache.hadoop.hive.ql.plan.DescTableDesc; import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.DropIndexDesc; import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc; @@ -232,7 +223,6 @@ import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; import org.apache.hadoop.hive.ql.plan.ShowGrantDesc; -import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc; @@ -394,21 +384,6 @@ public int execute(DriverContext driverContext) { return createTable(db, crtTbl); } - CreateIndexDesc crtIndex = work.getCreateIndexDesc(); - if (crtIndex != null) { - return createIndex(db, crtIndex); - } - - AlterIndexDesc alterIndex = work.getAlterIndexDesc(); - if (alterIndex != null) { - return alterIndex(db, alterIndex); - } - - DropIndexDesc dropIdx = work.getDropIdxDesc(); - if (dropIdx != null) { - return dropIndex(db, dropIdx); - } - CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc(); if (crtTblLike != null) { return createTableLike(db, crtTblLike); @@ -589,11 +564,6 @@ public int execute(DriverContext driverContext) { return grantOrRevokeRole(db, grantOrRevokeRoleDDL); } - ShowIndexesDesc showIndexes = work.getShowIndexesDesc(); - if (showIndexes != null) { - return showIndexes(db, showIndexes); - } - AlterTablePartMergeFilesDesc mergeFilesDesc = work.getMergeFilesDesc(); if (mergeFilesDesc != null) { return mergeFiles(db, mergeFilesDesc, driverContext); @@ -743,8 +713,12 @@ private int alterResourcePlan(Hive db, AlterResourcePlanDesc desc) throws HiveEx if (!mustHaveAppliedChange && !desc.isReplace()) { return 0; // The modification cannot affect an active plan. } - if (appliedRp == null && !mustHaveAppliedChange) return 0; // Replacing an inactive plan. - if (wm == null && isInTest) return 0; // Skip for tests if WM is not present. + if (appliedRp == null && !mustHaveAppliedChange) { + return 0; // Replacing an inactive plan. + } + if (wm == null && isInTest) { + return 0; // Skip for tests if WM is not present. + } if ((appliedRp == null) != desc.isForceDeactivate()) { throw new HiveException("Cannot get a resource plan to apply; or non-null plan on disable"); @@ -1250,134 +1224,6 @@ private int alterDatabase(Hive db, AlterDatabaseDesc alterDbDesc) throws HiveExc return 0; } - private int dropIndex(Hive db, DropIndexDesc dropIdx) throws HiveException { - - if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { - throw new UnsupportedOperationException("Indexes unsupported for Tez execution engine"); - } - - db.dropIndex(dropIdx.getTableName(), dropIdx.getIndexName(), dropIdx.isThrowException(), true); - return 0; - } - - private int createIndex(Hive db, CreateIndexDesc crtIndex) throws HiveException { - - if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { - throw new UnsupportedOperationException("Indexes unsupported for Tez execution engine"); - } - - if( crtIndex.getSerde() != null) { - validateSerDe(crtIndex.getSerde()); - } - - String indexTableName = crtIndex.getIndexTableName(); - // If location is specified - ensure that it is a full qualified name - makeLocationQualified(crtIndex, indexTableName); - - db - .createIndex( - crtIndex.getTableName(), crtIndex.getIndexName(), crtIndex.getIndexTypeHandlerClass(), - crtIndex.getIndexedCols(), crtIndex.getIndexTableName(), crtIndex.getDeferredRebuild(), - crtIndex.getInputFormat(), crtIndex.getOutputFormat(), crtIndex.getSerde(), - crtIndex.getStorageHandler(), crtIndex.getLocation(), crtIndex.getIdxProps(), crtIndex.getTblProps(), - crtIndex.getSerdeProps(), crtIndex.getCollItemDelim(), crtIndex.getFieldDelim(), crtIndex.getFieldEscape(), - crtIndex.getLineDelim(), crtIndex.getMapKeyDelim(), crtIndex.getIndexComment() - ); - if (HiveUtils.getIndexHandler(conf, crtIndex.getIndexTypeHandlerClass()).usesIndexTable()) { - Table indexTable = db.getTable(indexTableName); - addIfAbsentByName(new WriteEntity(indexTable, WriteEntity.WriteType.DDL_NO_LOCK)); - } - return 0; - } - - private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException { - - if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { - throw new UnsupportedOperationException("Indexes unsupported for Tez execution engine"); - } - - String baseTableName = alterIndex.getBaseTableName(); - String indexName = alterIndex.getIndexName(); - Index idx = db.getIndex(baseTableName, indexName); - - switch(alterIndex.getOp()) { - case ADDPROPS: - idx.getParameters().putAll(alterIndex.getProps()); - break; - case UPDATETIMESTAMP: - try { - Map props = new HashMap(); - Map, Long> basePartTs = new HashMap, Long>(); - - Table baseTbl = db.getTable(baseTableName); - - if (baseTbl.isPartitioned()) { - List baseParts; - if (alterIndex.getSpec() != null) { - baseParts = db.getPartitions(baseTbl, alterIndex.getSpec()); - } else { - baseParts = db.getPartitions(baseTbl); - } - if (baseParts != null) { - for (Partition p : baseParts) { - Path dataLocation = p.getDataLocation(); - FileSystem fs = dataLocation.getFileSystem(db.getConf()); - FileStatus fss = fs.getFileStatus(dataLocation); - long lastModificationTime = fss.getModificationTime(); - - FileStatus[] parts = fs.listStatus(dataLocation, FileUtils.HIDDEN_FILES_PATH_FILTER); - if (parts != null && parts.length > 0) { - for (FileStatus status : parts) { - if (status.getModificationTime() > lastModificationTime) { - lastModificationTime = status.getModificationTime(); - } - } - } - basePartTs.put(p.getSpec(), lastModificationTime); - } - } - } else { - FileSystem fs = baseTbl.getPath().getFileSystem(db.getConf()); - FileStatus fss = fs.getFileStatus(baseTbl.getPath()); - basePartTs.put(null, fss.getModificationTime()); - } - for (Map spec : basePartTs.keySet()) { - if (spec != null) { - props.put(spec.toString(), basePartTs.get(spec).toString()); - } else { - props.put("base_timestamp", basePartTs.get(null).toString()); - } - } - idx.getParameters().putAll(props); - } catch (HiveException e) { - throw new HiveException("ERROR: Failed to update index timestamps"); - } catch (IOException e) { - throw new HiveException("ERROR: Failed to look up timestamps on filesystem"); - } - - break; - default: - console.printError("Unsupported Alter command"); - return 1; - } - - // set last modified by properties - if (!updateModifiedParameters(idx.getParameters(), conf)) { - return 1; - } - - try { - db.alterIndex(baseTableName, indexName, idx); - } catch (InvalidOperationException e) { - console.printError("Invalid alter operation: " + e.getMessage()); - LOG.info("alter index: ", e); - return 1; - } catch (HiveException e) { - console.printError("Invalid alter operation: " + e.getMessage()); - return 1; - } - return 0; - } /** * Alters a materialized view. @@ -2782,57 +2628,6 @@ private StringBuilder appendSerdeParams(StringBuilder builder, Map indexes = null; - - tbl = db.getTable(tableName); - - indexes = db.getIndexes(tbl.getDbName(), tbl.getTableName(), (short) -1); - - // In case the query is served by HiveServer2, don't pad it with spaces, - // as HiveServer2 output is consumed by JDBC/ODBC clients. - boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); - - // write the results in the file - DataOutputStream outStream = getOutputStream(showIndexes.getResFile()); - try { - if (showIndexes.isFormatted()) { - // column headers - outStream.write(MetaDataFormatUtils.getIndexColumnsHeader().getBytes(StandardCharsets.UTF_8)); - } - - for (Index index : indexes) - { - outStream.write(MetaDataFormatUtils.getIndexInformation(index, isOutputPadded).getBytes(StandardCharsets.UTF_8)); - } - } catch (FileNotFoundException e) { - LOG.info("show indexes: ", e); - throw new HiveException(e.toString()); - } catch (IOException e) { - LOG.info("show indexes: ", e); - throw new HiveException(e.toString()); - } catch (Exception e) { - throw new HiveException(e.toString()); - } finally { - IOUtils.closeStream(outStream); - } - - return 0; - } /** * Write a list of the available databases to a file. @@ -5294,37 +5089,6 @@ public static void makeLocationQualified(String databaseName, StorageDescriptor } /** - * Make qualified location for an index . - * - * @param crtIndex - * Create index descriptor. - * @param name - * Object name. - */ - private void makeLocationQualified(CreateIndexDesc crtIndex, String name) throws HiveException - { - Path path = null; - if (crtIndex.getLocation() == null) { - // Location is not set, leave it as-is if index doesn't belong to default DB - // Currently all indexes are created in current DB only - if (Utilities.getDatabaseName(name).equalsIgnoreCase(Warehouse.DEFAULT_DATABASE_NAME)) { - // Default database name path is always ignored, use METASTOREWAREHOUSE and object name - // instead - String warehouse = HiveConf.getVar(conf, ConfVars.METASTOREWAREHOUSE); - String tableName = Utilities.getTableName(name); - path = new Path(warehouse, tableName.toLowerCase()); - } - } - else { - path = new Path(crtIndex.getLocation()); - } - - if (path != null) { - crtIndex.setLocation(Utilities.getQualifiedPath(conf, path)); - } - } - - /** * Make qualified location for a database . * * @param database diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java index f7801bb5a4..32fc257b03 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java @@ -383,10 +383,6 @@ system.registerGenericUDF("between", GenericUDFBetween.class); system.registerGenericUDF("in_bloom_filter", GenericUDFInBloomFilter.class); - system.registerGenericUDF("ewah_bitmap_and", GenericUDFEWAHBitmapAnd.class); - system.registerGenericUDF("ewah_bitmap_or", GenericUDFEWAHBitmapOr.class); - system.registerGenericUDF("ewah_bitmap_empty", GenericUDFEWAHBitmapEmpty.class); - // Utility UDFs system.registerUDF("version", UDFVersion.class, false); @@ -447,8 +443,6 @@ system.registerGenericUDAF("ngrams", new GenericUDAFnGrams()); system.registerGenericUDAF("context_ngrams", new GenericUDAFContextNGrams()); - system.registerGenericUDAF("ewah_bitmap", new GenericUDAFEWAHBitmap()); - system.registerGenericUDAF("compute_stats", new GenericUDAFComputeStats()); system.registerGenericUDAF("bloom_filter", new GenericUDAFBloomFilter()); system.registerUDAF("percentile", UDAFPercentile.class); @@ -1661,7 +1655,9 @@ public static FunctionInfo registerPermanentFunction(String functionName, public static boolean isPermanentFunction(ExprNodeGenericFuncDesc fnExpr) { GenericUDF udf = fnExpr.getGenericUDF(); - if (udf == null) return false; + if (udf == null) { + return false; + } Class clazz = udf.getClass(); if (udf instanceof GenericUDFBridge) { @@ -1787,7 +1783,9 @@ public static boolean isRankingFunction(String name) throws SemanticException { */ public static boolean isBuiltInFuncExpr(ExprNodeGenericFuncDesc fnExpr) { GenericUDF udf = fnExpr.getGenericUDF(); - if (udf == null) return false; + if (udf == null) { + return false; + } Class clazz = udf.getClass(); if (udf instanceof GenericUDFBridge) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java index 5d2c759b32..c2959d992c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java @@ -197,33 +197,6 @@ public static void setChildrenCollector(List> c return lastOp; } - /** - * Starting at the input operator, finds the last operator upstream that is - * an instance of the input class. - * - * @param op the starting operator - * @param clazz the class that the operator that we are looking for instantiates - * @return null if no such operator exists or multiple branches are found in - * the stream, the last operator otherwise - */ - @SuppressWarnings("unchecked") - public static T findLastOperatorUpstream(Operator op, Class clazz) { - Operator currentOp = op; - T lastOp = null; - while (currentOp != null) { - if (clazz.isInstance(currentOp)) { - lastOp = (T) currentOp; - } - if (currentOp.getParentOperators().size() == 1) { - currentOp = currentOp.getParentOperators().get(0); - } - else { - currentOp = null; - } - } - return lastOp; - } - public static void iterateParents(Operator operator, Function> function) { iterateParents(operator, function, new HashSet>()); } @@ -240,10 +213,6 @@ private static void iterateParents(Operator operator, Function> f } } - public static boolean sameRowSchema(Operator operator1, Operator operator2) { - return operator1.getSchema().equals(operator2.getSchema()); - } - /** * Given an operator and a set of classes, it classifies the operators it finds * in the stream depending on the classes they instantiate. diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java index 85cef86646..83590e2176 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java @@ -33,8 +33,6 @@ import org.apache.hadoop.hive.ql.exec.repl.bootstrap.ReplLoadWork; import org.apache.hadoop.hive.ql.exec.spark.SparkTask; import org.apache.hadoop.hive.ql.exec.tez.TezTask; -import org.apache.hadoop.hive.ql.index.IndexMetadataChangeTask; -import org.apache.hadoop.hive.ql.index.IndexMetadataChangeWork; import org.apache.hadoop.hive.ql.io.merge.MergeFileTask; import org.apache.hadoop.hive.ql.io.merge.MergeFileWork; import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; @@ -104,8 +102,6 @@ public TaskTuple(Class workClass, Class> taskClass) { DependencyCollectionTask.class)); taskvec.add(new TaskTuple(ImportCommitWork.class, ImportCommitTask.class)); - taskvec.add(new TaskTuple(IndexMetadataChangeWork.class, - IndexMetadataChangeTask.class)); taskvec.add(new TaskTuple(TezWork.class, TezTask.class)); taskvec.add(new TaskTuple(SparkWork.class, SparkTask.class)); taskvec.add(new TaskTuple<>(ReplDumpWork.class, ReplDumpTask.class)); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 8f44c94c7c..82484429cf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -3567,11 +3567,6 @@ public static void setInputAttributes(Configuration conf, MapWork mWork) { if (mWork.getInputformat() != null) { HiveConf.setVar(conf, var, mWork.getInputformat()); } - if (mWork.getIndexIntermediateFile() != null) { - conf.set(ConfVars.HIVE_INDEX_COMPACT_FILE.varname, mWork.getIndexIntermediateFile()); - conf.set(ConfVars.HIVE_INDEX_BLOCKFILTER_FILE.varname, mWork.getIndexIntermediateFile()); - } - // Intentionally overwrites anything the user may have put here conf.setBoolean("hive.input.format.sorted", mWork.isInputFormatSorted()); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java index b436e80ae3..e7fe4a2a2f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java @@ -574,11 +574,6 @@ protected void setInputAttributes(Configuration conf) { if (mWork.getInputformat() != null) { HiveConf.setVar(conf, ConfVars.HIVEINPUTFORMAT, mWork.getInputformat()); } - if (mWork.getIndexIntermediateFile() != null) { - conf.set(ConfVars.HIVE_INDEX_COMPACT_FILE.varname, mWork.getIndexIntermediateFile()); - conf.set(ConfVars.HIVE_INDEX_BLOCKFILTER_FILE.varname, mWork.getIndexIntermediateFile()); - } - // Intentionally overwrites anything the user may have put here conf.setBoolean("hive.input.format.sorted", mWork.isInputFormatSorted()); diff --git ql/src/java/org/apache/hadoop/hive/ql/index/AbstractIndexHandler.java ql/src/java/org/apache/hadoop/hive/ql/index/AbstractIndexHandler.java deleted file mode 100644 index 3424600472..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/AbstractIndexHandler.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.index; - -import java.util.List; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.ql.metadata.HiveUtils; -import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; - -/** - * Abstract base class for index handlers. This is provided as insulation - * so that as HiveIndexHandler evolves, default implementations of new - * methods can be added here in order to avoid breaking existing - * plugin implementations. - */ -public abstract class AbstractIndexHandler implements HiveIndexHandler { - - public static String getColumnNames(List fieldSchemas) { - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < fieldSchemas.size(); i++) { - if (i > 0) { - sb.append(","); - } - sb.append(HiveUtils.unparseIdentifier(fieldSchemas.get(i).getName())); - } - return sb.toString(); - } - - public void generateIndexQuery(Index index, ExprNodeDesc predicate, - ParseContext pctx, HiveIndexQueryContext queryContext) { - queryContext.setQueryTasks(null); - return; - } - - public boolean checkQuerySize(long inputSize, HiveConf conf) { - return false; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java deleted file mode 100644 index fb770967bc..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.index; - -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.index.compact.CompactIndexHandler; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveUtils; -import org.apache.hadoop.hive.ql.metadata.VirtualColumn; -import org.apache.hadoop.hive.ql.optimizer.IndexUtils; -import org.apache.hadoop.hive.ql.plan.PartitionDesc; -import org.apache.hadoop.hive.ql.session.LineageState; - -/** - * Index handler for indexes that have aggregate functions on indexed columns. - * - */ -public class AggregateIndexHandler extends CompactIndexHandler { - - @Override - public void analyzeIndexDefinition(Table baseTable, Index index, - Table indexTable) throws HiveException { - StorageDescriptor storageDesc = index.getSd(); - if (this.usesIndexTable() && indexTable != null) { - StorageDescriptor indexTableSd = storageDesc.deepCopy(); - List indexTblCols = indexTableSd.getCols(); - FieldSchema bucketFileName = new FieldSchema("_bucketname", "string", ""); - indexTblCols.add(bucketFileName); - FieldSchema offSets = new FieldSchema("_offsets", "array", ""); - indexTblCols.add(offSets); - Map paraList = index.getParameters(); - - if(paraList != null && paraList.containsKey("AGGREGATES")){ - String propValue = paraList.get("AGGREGATES"); - if(propValue.contains(",")){ - String[] aggFuncs = propValue.split(","); - for (int i = 0; i < aggFuncs.length; i++) { - createAggregationFunction(indexTblCols, aggFuncs[i]); - } - }else{ - createAggregationFunction(indexTblCols, propValue); - } - } - indexTable.setSd(indexTableSd); - } - } - - private void createAggregationFunction(List indexTblCols, String property){ - String[] aggFuncCol = property.split("\\("); - String funcName = aggFuncCol[0]; - String colName = aggFuncCol[1].substring(0, aggFuncCol[1].length() - 1); - if(colName.contains("*")){ - colName = colName.replace("*", "all"); - } - FieldSchema aggregationFunction = - new FieldSchema("_" + funcName + "_of_" + colName + "", "bigint", ""); - indexTblCols.add(aggregationFunction); - } - - @Override - protected Task getIndexBuilderMapRedTask(Set inputs, - Set outputs, - Index index, boolean partitioned, - PartitionDesc indexTblPartDesc, String indexTableName, - PartitionDesc baseTablePartDesc, String baseTableName, String dbName, - LineageState lineageState) { - - List indexField = index.getSd().getCols(); - String indexCols = HiveUtils.getUnparsedColumnNamesFromFieldSchema(indexField); - - //form a new insert overwrite query. - StringBuilder command= new StringBuilder(); - Map partSpec = indexTblPartDesc.getPartSpec(); - - command.append("INSERT OVERWRITE TABLE " + HiveUtils.unparseIdentifier(indexTableName)); - if (partitioned && indexTblPartDesc != null) { - command.append(" PARTITION ( "); - List ret = getPartKVPairStringArray((LinkedHashMap) partSpec); - for (int i = 0; i < ret.size(); i++) { - String partKV = ret.get(i); - command.append(partKV); - if (i < ret.size() - 1) { - command.append(","); - } - } - command.append(" ) "); - } - - command.append(" SELECT "); - command.append(indexCols); - command.append(","); - - command.append(VirtualColumn.FILENAME.getName()); - command.append(","); - command.append(" collect_set ("); - command.append(VirtualColumn.BLOCKOFFSET.getName()); - command.append(") "); - command.append(","); - - assert indexField.size()==1; - - Map paraList = index.getParameters(); - if(paraList != null && paraList.containsKey("AGGREGATES")){ - command.append(paraList.get("AGGREGATES") + " "); - } - - command.append(" FROM " + HiveUtils.unparseIdentifier(baseTableName)); - Map basePartSpec = baseTablePartDesc.getPartSpec(); - if(basePartSpec != null) { - command.append(" WHERE "); - List pkv = getPartKVPairStringArray((LinkedHashMap) basePartSpec); - for (int i = 0; i < pkv.size(); i++) { - String partKV = pkv.get(i); - command.append(partKV); - if (i < pkv.size() - 1) { - command.append(" AND "); - } - } - } - command.append(" GROUP BY "); - command.append(indexCols + ", " + VirtualColumn.FILENAME.getName()); - - HiveConf builderConf = new HiveConf(getConf(), AggregateIndexHandler.class); - builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPFILES, false); - builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPREDFILES, false); - builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGETEZFILES, false); - Task rootTask = IndexUtils.createRootTask(builderConf, inputs, outputs, - command, (LinkedHashMap) partSpec, indexTableName, dbName, lineageState); - return rootTask; - } - } diff --git ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java deleted file mode 100644 index 30ae484c05..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.index; - -import org.apache.hadoop.hive.ql.index.bitmap.BitmapIndexHandler; -import org.apache.hadoop.hive.ql.index.compact.CompactIndexHandler; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Holds index related constants - */ -public class HiveIndex { - public static final Logger l4j = LoggerFactory.getLogger("HiveIndex"); - public static final String INDEX_TABLE_CREATETIME = "hive.index.basetbl.dfs.lastModifiedTime"; - - public static enum IndexType { - AGGREGATE_TABLE("aggregate", AggregateIndexHandler.class.getName()), - COMPACT_SUMMARY_TABLE("compact", CompactIndexHandler.class.getName()), - BITMAP_TABLE("bitmap", BitmapIndexHandler.class.getName()); - - private IndexType(String indexType, String className) { - indexTypeName = indexType; - this.handlerClsName = className; - } - - private final String indexTypeName; - private final String handlerClsName; - - public String getName() { - return indexTypeName; - } - - public String getHandlerClsName() { - return handlerClsName; - } - } - - public static IndexType getIndexType(String name) { - IndexType[] types = IndexType.values(); - for (IndexType type : types) { - if(type.getName().equals(name.toLowerCase())) { - return type; - } - } - return null; - } - - public static IndexType getIndexTypeByClassName(String className) { - IndexType[] types = IndexType.values(); - for (IndexType type : types) { - if(type.getHandlerClsName().equals(className)) { - return type; - } - } - return null; - } - -} - diff --git ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java deleted file mode 100644 index 8facd9121a..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.index; - -import java.util.List; -import java.util.Set; - -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; -import org.apache.hadoop.hive.ql.session.LineageState; - -/** - * HiveIndexHandler defines a pluggable interface for adding new index handlers - * to Hive. - */ -public interface HiveIndexHandler extends Configurable { - /** - * Determines whether this handler implements indexes by creating an index - * table. - * - * @return true if index creation implies creation of an index table in Hive; - * false if the index representation is not stored in a Hive table - */ - boolean usesIndexTable(); - - /** - * Requests that the handler validate an index definition and fill in - * additional information about its stored representation. - * - * @param baseTable - * the definition of the table being indexed - * - * @param index - * the definition of the index being created - * - * @param indexTable - * a partial definition of the index table to be used for storing the - * index representation, or null if usesIndexTable() returns false; - * the handler can augment the index's storage descriptor (e.g. with - * information about input/output format) and/or the index table's - * definition (typically with additional columns containing the index - * representation, e.g. pointers into HDFS). - * - * @throws HiveException if the index definition is invalid with respect to - * either the base table or the supplied index table definition - */ - void analyzeIndexDefinition( - org.apache.hadoop.hive.metastore.api.Table baseTable, - org.apache.hadoop.hive.metastore.api.Index index, - org.apache.hadoop.hive.metastore.api.Table indexTable) - throws HiveException; - - /** - * Requests that the handler generate a plan for building the index; the plan - * should read the base table and write out the index representation. - * - * @param baseTbl - * the definition of the table being indexed - * - * @param index - * the definition of the index - * - * @param baseTblPartitions - * list of base table partitions with each element mirrors to the - * corresponding one in indexTblPartitions - * - * @param indexTbl - * the definition of the index table, or null if usesIndexTable() - * returns null - * - * @param inputs - * inputs for hooks, supplemental outputs going - * along with the return value - * - * @param outputs - * outputs for hooks, supplemental outputs going - * along with the return value - * - * @param lineageState - * tracks Lineage for the query - * - * @return list of tasks to be executed in parallel for building the index - * - * @throws HiveException if plan generation fails - */ - List> generateIndexBuildTaskList( - org.apache.hadoop.hive.ql.metadata.Table baseTbl, - org.apache.hadoop.hive.metastore.api.Index index, - List indexTblPartitions, List baseTblPartitions, - org.apache.hadoop.hive.ql.metadata.Table indexTbl, - Set inputs, Set outputs, LineageState lineageState) - throws HiveException; - - /** - * Generate the list of tasks required to run an index optimized sub-query for the - * given predicate, using the given indexes. If multiple indexes are - * provided, it is up to the handler whether to use none, one, some or all of - * them. The supplied predicate may reference any of the columns from any of - * the indexes. If the handler decides to use more than one index, it is - * responsible for generating tasks to combine their search results - * (e.g. performing a JOIN on the result). - * @param indexes - * @param predicate - * @param pctx - * @param queryContext contains results, such as query tasks and input configuration - */ - void generateIndexQuery(List indexes, ExprNodeDesc predicate, - ParseContext pctx, HiveIndexQueryContext queryContext); - - /** - * Check the size of an input query to make sure it fits within the bounds - * - * @param inputSize size (in bytes) of the query in question - * @param conf - * @return true if query is within the bounds - */ - boolean checkQuerySize(long inputSize, HiveConf conf); -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexQueryContext.java ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexQueryContext.java deleted file mode 100644 index b7365413fa..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexQueryContext.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.index; - -import java.io.Serializable; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; - -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; - -/** - * Used to pass information between the IndexProcessor and the plugin - * IndexHandler during query processing - * - */ -public class HiveIndexQueryContext { - - private Set additionalSemanticInputs; // additional inputs to add to the parse context when - // merging the index query tasks - private String indexInputFormat; // input format to set on the TableScanOperator to activate indexing - private String indexIntermediateFile; // name of intermediate file written by the index query for the - // TableScanOperator to use - private List> queryTasks; // list of tasks that will execute the index query and write - // results to a temporary file - private ExprNodeDesc residualPredicate; // predicate that could not be processed by an index handler - // and should be used on the base table scan (see HIVE-2115) - private Set queryPartitions; // partitions accessed by the original query - - public HiveIndexQueryContext() { - this.additionalSemanticInputs = null; - this.indexInputFormat = null; - this.indexIntermediateFile = null; - this.queryTasks = null; - } - - public Set getAdditionalSemanticInputs() { - return additionalSemanticInputs; - } - public void addAdditionalSemanticInputs(Set additionalParseInputs) { - if (this.additionalSemanticInputs == null) { - this.additionalSemanticInputs = new LinkedHashSet(); - } - this.additionalSemanticInputs.addAll(additionalParseInputs); - } - - public String getIndexInputFormat() { - return indexInputFormat; - } - public void setIndexInputFormat(String indexInputFormat) { - this.indexInputFormat = indexInputFormat; - } - - public String getIndexIntermediateFile() { - return indexIntermediateFile; - } - public void setIndexIntermediateFile(String indexIntermediateFile) { - this.indexIntermediateFile = indexIntermediateFile; - } - - public List> getQueryTasks() { - return queryTasks; - } - public void setQueryTasks(List> indexQueryTasks) { - this.queryTasks = indexQueryTasks; - } - - public void setResidualPredicate(ExprNodeDesc residualPredicate) { - this.residualPredicate = residualPredicate; - } - - public ExprNodeDesc getResidualPredicate() { - return residualPredicate; - } - - public Set getQueryPartitions() { - return queryPartitions; - } - - public void setQueryPartitions(Set queryPartitions) { - this.queryPartitions = queryPartitions; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java deleted file mode 100644 index 66970662a3..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.index; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.SortedSet; -import java.util.TreeSet; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; -import org.apache.hadoop.hive.serde2.lazy.LazySerDeParameters; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapred.FileSplit; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.LineRecordReader.LineReader; - -/** - * HiveIndexResult parses the input stream from an index query - * to generate a list of file splits to query. - */ -public class HiveIndexResult implements IndexResult { - - public static final Logger l4j = - LoggerFactory.getLogger(HiveIndexResult.class.getSimpleName()); - - // IndexBucket - static class IBucket { - private String name = null; - private final SortedSet offsets = new TreeSet(); - - public IBucket(String n) { - name = n; - } - - public void add(Long offset) { - offsets.add(offset); - } - - public String getName() { - return name; - } - - public SortedSet getOffsets() { - return offsets; - } - - @Override - public boolean equals(Object obj) { - if (obj.getClass() != this.getClass()) { - return false; - } - return (((IBucket) obj).name.compareToIgnoreCase(this.name) == 0); - } - } - - JobConf job = null; - BytesRefWritable[] bytesRef = new BytesRefWritable[2]; - boolean ignoreHdfsLoc = false; - - public HiveIndexResult(List indexFiles, JobConf conf) throws IOException, - HiveException { - job = conf; - - bytesRef[0] = new BytesRefWritable(); - bytesRef[1] = new BytesRefWritable(); - ignoreHdfsLoc = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_INDEX_IGNORE_HDFS_LOC); - - if (indexFiles != null && indexFiles.size() > 0) { - List paths = new ArrayList(); - for (String indexFile : indexFiles) { - Path indexFilePath = new Path(indexFile); - FileSystem fs = indexFilePath.getFileSystem(conf); - FileStatus indexStat = fs.getFileStatus(indexFilePath); - if (indexStat.isDir()) { - FileStatus[] fss = fs.listStatus(indexFilePath, FileUtils.HIDDEN_FILES_PATH_FILTER); - for (FileStatus f : fss) { - paths.add(f.getPath()); - } - } else { - paths.add(indexFilePath); - } - } - - long maxEntriesToLoad = HiveConf.getLongVar(conf, HiveConf.ConfVars.HIVE_INDEX_COMPACT_QUERY_MAX_ENTRIES); - if (maxEntriesToLoad < 0) { - maxEntriesToLoad=Long.MAX_VALUE; - } - - long lineCounter = 0; - for (Path indexFinalPath : paths) { - FileSystem fs = indexFinalPath.getFileSystem(conf); - FSDataInputStream ifile = fs.open(indexFinalPath); - LineReader lr = new LineReader(ifile, conf); - try { - Text line = new Text(); - while (lr.readLine(line) > 0) { - if (++lineCounter > maxEntriesToLoad) { - throw new HiveException("Number of compact index entries loaded during the query exceeded the maximum of " + maxEntriesToLoad - + " set in " + HiveConf.ConfVars.HIVE_INDEX_COMPACT_QUERY_MAX_ENTRIES.varname); - } - add(line); - } - } - finally { - // this will close the input stream - lr.close(); - } - } - } - } - - Map buckets = new HashMap(); - - private void add(Text line) throws HiveException { - String l = line.toString(); - byte[] bytes = l.getBytes(); - int firstEnd = 0; - int i = 0; - for (int index = 0; index < bytes.length; index++) { - if (bytes[index] == LazySerDeParameters.DefaultSeparators[0]) { - i++; - firstEnd = index; - } - } - if (i > 1) { - throw new HiveException( - "Bad index file row (index file should only contain two columns: bucket_file_name and offset lists.) ." - + line.toString()); - } - String bucketFileName = new String(bytes, 0, firstEnd); - - if (ignoreHdfsLoc) { - Path tmpPath = new Path(bucketFileName); - bucketFileName = tmpPath.toUri().getPath(); - } - IBucket bucket = buckets.get(bucketFileName); - if (bucket == null) { - bucket = new IBucket(bucketFileName); - buckets.put(bucketFileName, bucket); - } - - int currentStart = firstEnd + 1; - int currentEnd = firstEnd + 1; - for (; currentEnd < bytes.length; currentEnd++) { - if (bytes[currentEnd] == LazySerDeParameters.DefaultSeparators[1]) { - String one_offset = new String(bytes, currentStart, currentEnd - - currentStart); - Long offset = Long.parseLong(one_offset); - bucket.getOffsets().add(offset); - currentStart = currentEnd + 1; - } - } - String one_offset = new String(bytes, currentStart, currentEnd - - currentStart); - bucket.getOffsets().add(Long.parseLong(one_offset)); - } - - @Override - public boolean contains(FileSplit split) throws HiveException { - - if (buckets == null) { - return false; - } - String bucketName = split.getPath().toString(); - IBucket bucket = buckets.get(bucketName); - if (bucket == null) { - bucketName = split.getPath().toUri().getPath(); - bucket = buckets.get(bucketName); - if (bucket == null) { - return false; - } - } - - for (Long offset : bucket.getOffsets()) { - if ((offset >= split.getStart()) - && (offset <= split.getStart() + split.getLength())) { - return true; - } - } - return false; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexedInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexedInputFormat.java deleted file mode 100644 index b5bddecaa5..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexedInputFormat.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.index; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Iterator; -import java.util.Set; -import java.util.Arrays; -import java.util.Collection; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; -import org.apache.hadoop.hive.ql.io.HiveInputFormat; -import org.apache.hadoop.hive.ql.io.IOPrepareCache; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.PartitionDesc; -import org.apache.hadoop.mapred.FileInputFormat; -import org.apache.hadoop.mapred.FileSplit; -import org.apache.hadoop.mapred.InputFormat; -import org.apache.hadoop.mapred.InputSplit; -import org.apache.hadoop.mapred.JobConf; - -/** - * Input format for doing queries that use indexes. - * Uses a blockfilter file to specify the blocks to query. - */ -public class HiveIndexedInputFormat extends HiveInputFormat { - public static final Logger l4j = LoggerFactory.getLogger("HiveIndexInputFormat"); - private final String indexFile; - - public HiveIndexedInputFormat() { - super(); - indexFile = "hive.index.blockfilter.file"; - } - - public HiveIndexedInputFormat(String indexFileName) { - indexFile = indexFileName; - } - - public InputSplit[] doGetSplits(JobConf job, int numSplits) throws IOException { - - super.init(job); - - Path[] dirs = FileInputFormat.getInputPaths(job); - if (dirs.length == 0) { - throw new IOException("No input paths specified in job"); - } - JobConf newjob = new JobConf(job); - ArrayList result = new ArrayList(); - - // for each dir, get the InputFormat, and do getSplits. - PartitionDesc part; - for (Path dir : dirs) { - part = HiveFileFormatUtils - .getFromPathRecursively(pathToPartitionInfo, dir, - IOPrepareCache.get().allocatePartitionDescMap(), true); - // create a new InputFormat instance if this is the first time to see this - // class - Class inputFormatClass = part.getInputFileFormatClass(); - InputFormat inputFormat = getInputFormatFromCache(inputFormatClass, job); - - try { - Utilities.copyTableJobPropertiesToConf(part.getTableDesc(), newjob); - } catch (HiveException e) { - throw new IOException(e); - } - - FileInputFormat.setInputPaths(newjob, dir); - newjob.setInputFormat(inputFormat.getClass()); - InputSplit[] iss = inputFormat.getSplits(newjob, numSplits / dirs.length); - for (InputSplit is : iss) { - result.add(new HiveInputSplit(is, inputFormatClass.getName())); - } - } - return result.toArray(new HiveInputSplit[result.size()]); - } - - public static List getIndexFiles(String indexFileStr) { - // tokenize and store string of form (path,)+ - if (indexFileStr == null) { - return null; - } - String[] chunks = indexFileStr.split(","); - return Arrays.asList(chunks); - } - - @Override - public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { - String indexFileStr = job.get(indexFile); - l4j.info("index_file is " + indexFileStr); - List indexFiles = getIndexFiles(indexFileStr); - - HiveIndexResult hiveIndexResult = null; - if (indexFiles != null) { - boolean first = true; - StringBuilder newInputPaths = new StringBuilder(); - try { - hiveIndexResult = new HiveIndexResult(indexFiles, job); - } catch (HiveException e) { - l4j.error("Unable to read index.."); - throw new IOException(e); - } - - Set inputFiles = hiveIndexResult.buckets.keySet(); - if (inputFiles == null || inputFiles.size() <= 0) { - // return empty splits if index results were empty - return new InputSplit[0]; - } - Iterator iter = inputFiles.iterator(); - while(iter.hasNext()) { - String path = iter.next(); - if (path.trim().equalsIgnoreCase("")) { - continue; - } - if (!first) { - newInputPaths.append(","); - } else { - first = false; - } - newInputPaths.append(path); - } - FileInputFormat.setInputPaths(job, newInputPaths.toString()); - } else { - return super.getSplits(job, numSplits); - } - - HiveInputSplit[] splits = (HiveInputSplit[]) this.doGetSplits(job, numSplits); - - long maxInputSize = HiveConf.getLongVar(job, ConfVars.HIVE_INDEX_COMPACT_QUERY_MAX_SIZE); - if (maxInputSize < 0) { - maxInputSize=Long.MAX_VALUE; - } - - SplitFilter filter = new SplitFilter(hiveIndexResult, maxInputSize); - Collection newSplits = filter.filter(splits); - - return newSplits.toArray(new FileSplit[newSplits.size()]); - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java deleted file mode 100644 index 9e714e459e..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.index; - -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.ql.DriverContext; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.plan.api.StageType; - -public class IndexMetadataChangeTask extends Task{ - - private static final long serialVersionUID = 1L; - - @Override - protected int execute(DriverContext driverContext) { - - try { - Hive db = Hive.get(conf); - IndexMetadataChangeWork work = this.getWork(); - String tblName = work.getIndexTbl(); - Table tbl = db.getTable(work.getDbName(), tblName); - if (tbl == null ) { - console.printError("Index table can not be null."); - return 1; - } - - if (!tbl.getTableType().equals(TableType.INDEX_TABLE)) { - console.printError("Table " + tbl.getTableName() + " not specified."); - return 1; - } - - if (tbl.isPartitioned() && work.getPartSpec() == null) { - console.printError("Index table is partitioned, but no partition specified."); - return 1; - } - - if (work.getPartSpec() != null) { - Partition part = db.getPartition(tbl, work.getPartSpec(), false); - if (part == null) { - console.printError("Partition " + - Warehouse.makePartName(work.getPartSpec(), false).toString() - + " does not exist."); - return 1; - } - - Path path = part.getDataLocation(); - FileSystem fs = path.getFileSystem(conf); - FileStatus fstat = fs.getFileStatus(path); - - part.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime())); - db.alterPartition(tbl.getTableName(), part, null); - } else { - Path url = new Path(tbl.getPath().toString()); - FileSystem fs = url.getFileSystem(conf); - FileStatus fstat = fs.getFileStatus(url); - tbl.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime())); - db.alterTable(tbl, null); - } - } catch (Exception e) { - e.printStackTrace(); - console.printError("Error changing index table/partition metadata " - + e.getMessage()); - return 1; - } - return 0; - } - - @Override - public String getName() { - return IndexMetadataChangeTask.class.getSimpleName(); - } - - @Override - public StageType getType() { - return StageType.DDL; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeWork.java ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeWork.java deleted file mode 100644 index 6d77ea4b98..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeWork.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.index; - -import java.io.Serializable; -import java.util.HashMap; - -public class IndexMetadataChangeWork implements Serializable { - - private static final long serialVersionUID = 1L; - - private HashMap partSpec; - private String indexTbl; - private String dbName; - - public IndexMetadataChangeWork() { - } - - public IndexMetadataChangeWork(HashMap partSpec, - String indexTbl, String dbName) { - super(); - this.partSpec = partSpec; - this.indexTbl = indexTbl; - this.dbName = dbName; - } - - public HashMap getPartSpec() { - return partSpec; - } - - public void setPartSpec(HashMap partSpec) { - this.partSpec = partSpec; - } - - public String getIndexTbl() { - return indexTbl; - } - - public void setIndexTbl(String indexTbl) { - this.indexTbl = indexTbl; - } - - public String getDbName() { - return dbName; - } - - public void setDbName(String dbName) { - this.dbName = dbName; - } - -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java index 747603642e..6a3f3b4ec9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java @@ -61,7 +61,10 @@ * Currently, it only supports pure conjunctions over binary expressions * comparing a column reference with a constant value. It is assumed * that all column aliases encountered refer to the same table. + * + * @deprecated kept only because some storagehandlers are using it internally */ +@Deprecated public class IndexPredicateAnalyzer { private final Set udfNames; diff --git ql/src/java/org/apache/hadoop/hive/ql/index/IndexResult.java ql/src/java/org/apache/hadoop/hive/ql/index/IndexResult.java deleted file mode 100644 index e8f2daff03..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/IndexResult.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.index; - -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.mapred.FileSplit; - -public interface IndexResult { - boolean contains(FileSplit split) throws HiveException; -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/IndexSearchCondition.java ql/src/java/org/apache/hadoop/hive/ql/index/IndexSearchCondition.java index 15cb1f7b09..3985246aa3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/index/IndexSearchCondition.java +++ ql/src/java/org/apache/hadoop/hive/ql/index/IndexSearchCondition.java @@ -25,7 +25,9 @@ * IndexSearchCondition represents an individual search condition * found by {@link IndexPredicateAnalyzer}. * + * @deprecated kept only because some storagehandlers are using it internally */ +@Deprecated public class IndexSearchCondition { private ExprNodeColumnDesc columnDesc; @@ -56,7 +58,7 @@ public IndexSearchCondition( * @param constantDesc constant value to search for * * @param indexExpr the comparison expression for the index - * + * * @param originalExpr the original comparison expression */ public IndexSearchCondition( diff --git ql/src/java/org/apache/hadoop/hive/ql/index/SplitFilter.java ql/src/java/org/apache/hadoop/hive/ql/index/SplitFilter.java deleted file mode 100644 index c51dec60f7..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/SplitFilter.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.index; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Iterables; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Comparator; -import java.util.List; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.io.HiveInputFormat.HiveInputSplit; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.io.SequenceFile; -import org.apache.hadoop.mapred.FileSplit; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public final class SplitFilter { - public static final Logger LOG = LoggerFactory.getLogger(SplitFilter.class); - - private final IndexResult indexResult; - private final long maxInputSize; - - public SplitFilter(IndexResult indexResult, long maxInputSize) { - this.indexResult = indexResult; - this.maxInputSize = maxInputSize; - } - - public List filter(HiveInputSplit[] splits) throws IOException { - long sumSplitLengths = 0; - List newSplits = new ArrayList<>(); - - Arrays.sort(splits, new HiveInputSplitComparator()); - - for (HiveInputSplit split : splits) { - LOG.info("split start : " + split.getStart()); - LOG.info("split end : " + (split.getStart() + split.getLength())); - - try { - if (indexResult.contains(split)) { - HiveInputSplit newSplit = split; - if (isAdjustmentRequired(newSplits, split)) { - newSplit = adjustSplit(split); - } - sumSplitLengths += newSplit.getLength(); - if (sumSplitLengths > maxInputSize) { - String messageTemplate = "Size of data to read during a compact-index-based query " + - "exceeded the maximum of %d set in %s"; - throw new IOException(String.format(messageTemplate, maxInputSize, - HiveConf.ConfVars.HIVE_INDEX_COMPACT_QUERY_MAX_SIZE.varname)); - } - newSplits.add(newSplit); - } - } catch (HiveException e) { - throw new RuntimeException("Unable to get metadata for input table split " + - split.getPath(), e); - } - } - LOG.info("Number of input splits: {}, new input splits: {}, sum of split lengths: {}", - splits.length, newSplits.size(), sumSplitLengths); - return newSplits; - } - - private boolean isAdjustmentRequired(List newSplits, HiveInputSplit split) { - return (split.inputFormatClassName().contains("RCFile") || - split.inputFormatClassName().contains("SequenceFile")) && split.getStart() > 0 && - !doesOverlap(newSplits, split.getPath(), adjustStart(split.getStart())); - } - - private boolean doesOverlap(List newSplits, Path path, long start) { - if (newSplits.isEmpty()) { - return false; - } - HiveInputSplit lastSplit = Iterables.getLast(newSplits); - if (lastSplit.getPath().equals(path)) { - return lastSplit.getStart() + lastSplit.getLength() > start; - } - return false; - } - - private long adjustStart(long start) { - return start > SequenceFile.SYNC_INTERVAL ? start - SequenceFile.SYNC_INTERVAL : 0; - } - - private HiveInputSplit adjustSplit(HiveInputSplit split) throws IOException { - long adjustedStart = adjustStart(split.getStart()); - return new HiveInputSplit(new FileSplit(split.getPath(), adjustedStart, - split.getStart() - adjustedStart + split.getLength(), split.getLocations()), - split.inputFormatClassName()); - } - - @VisibleForTesting - static final class HiveInputSplitComparator implements Comparator { - @Override - public int compare(HiveInputSplit o1, HiveInputSplit o2) { - int pathCompare = comparePath(o1.getPath(), o2.getPath()); - if (pathCompare != 0) { - return pathCompare; - } - return Long.compare(o1.getStart(), o2.getStart()); - } - - private int comparePath(Path p1, Path p2) { - return p1.compareTo(p2); - } - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java deleted file mode 100644 index d8615221b2..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.index; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Set; -import java.util.Map.Entry; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveUtils; -import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.PartitionDesc; -import org.apache.hadoop.hive.ql.plan.TableDesc; -import org.apache.hadoop.hive.ql.session.LineageState; - -/** - * Index handler for indexes that use tables to store indexes. - */ -public abstract class TableBasedIndexHandler extends AbstractIndexHandler { - protected Configuration configuration; - - @Override - public List> generateIndexBuildTaskList( - org.apache.hadoop.hive.ql.metadata.Table baseTbl, - org.apache.hadoop.hive.metastore.api.Index index, - List indexTblPartitions, List baseTblPartitions, - org.apache.hadoop.hive.ql.metadata.Table indexTbl, - Set inputs, Set outputs, - LineageState lineageState) throws HiveException { - try { - - TableDesc desc = Utilities.getTableDesc(indexTbl); - - List newBaseTblPartitions = new ArrayList(); - - List> indexBuilderTasks = new ArrayList>(); - - if (!baseTbl.isPartitioned()) { - // the table does not have any partition, then create index for the - // whole table - Task indexBuilder = getIndexBuilderMapRedTask(inputs, outputs, index, false, - new PartitionDesc(desc, null), indexTbl.getTableName(), - new PartitionDesc(Utilities.getTableDesc(baseTbl), null), - baseTbl.getTableName(), indexTbl.getDbName(), lineageState); - indexBuilderTasks.add(indexBuilder); - } else { - - // check whether the index table partitions are still exists in base - // table - for (int i = 0; i < indexTblPartitions.size(); i++) { - Partition indexPart = indexTblPartitions.get(i); - Partition basePart = null; - for (int j = 0; j < baseTblPartitions.size(); j++) { - if (baseTblPartitions.get(j).getName().equals(indexPart.getName())) { - basePart = baseTblPartitions.get(j); - newBaseTblPartitions.add(baseTblPartitions.get(j)); - break; - } - } - if (basePart == null) { - throw new RuntimeException( - "Partitions of base table and index table are inconsistent."); - } - // for each partition, spawn a map reduce task. - Task indexBuilder = getIndexBuilderMapRedTask(inputs, outputs, index, true, - new PartitionDesc(indexPart), indexTbl.getTableName(), - new PartitionDesc(basePart), baseTbl.getTableName(), indexTbl.getDbName(), - lineageState); - indexBuilderTasks.add(indexBuilder); - } - } - return indexBuilderTasks; - } catch (Exception e) { - throw new SemanticException(e); - } - } - - protected Task getIndexBuilderMapRedTask(Set inputs, Set outputs, - Index index, boolean partitioned, - PartitionDesc indexTblPartDesc, String indexTableName, - PartitionDesc baseTablePartDesc, String baseTableName, String dbName, - LineageState lineageState) throws HiveException { - return getIndexBuilderMapRedTask(inputs, outputs, index.getSd().getCols(), - partitioned, indexTblPartDesc, indexTableName, baseTablePartDesc, baseTableName, dbName, - lineageState); - } - - protected Task getIndexBuilderMapRedTask(Set inputs, Set outputs, - List indexField, boolean partitioned, - PartitionDesc indexTblPartDesc, String indexTableName, - PartitionDesc baseTablePartDesc, String baseTableName, String dbName, - LineageState lineageState) throws HiveException { - return null; - } - - protected List getPartKVPairStringArray( - LinkedHashMap partSpec) { - List ret = new ArrayList(partSpec.size()); - Iterator> iter = partSpec.entrySet().iterator(); - while (iter.hasNext()) { - StringBuilder sb = new StringBuilder(); - Entry p = iter.next(); - sb.append(HiveUtils.unparseIdentifier(p.getKey())); - sb.append(" = "); - sb.append("'"); - sb.append(HiveUtils.escapeString(p.getValue())); - sb.append("'"); - ret.add(sb.toString()); - } - return ret; - } - - @Override - public boolean usesIndexTable() { - return true; - } - - @Override - public Configuration getConf() { - return configuration; - } - - @Override - public void setConf(Configuration conf) { - this.configuration = conf; - } - -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java deleted file mode 100644 index 62db4db814..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java +++ /dev/null @@ -1,312 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.index.bitmap; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.ql.Driver; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.index.HiveIndexQueryContext; -import org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat; -import org.apache.hadoop.hive.ql.index.IndexPredicateAnalyzer; -import org.apache.hadoop.hive.ql.index.IndexSearchCondition; -import org.apache.hadoop.hive.ql.index.TableBasedIndexHandler; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveUtils; -import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.metadata.VirtualColumn; -import org.apache.hadoop.hive.ql.optimizer.IndexUtils; -import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; -import org.apache.hadoop.hive.ql.plan.PartitionDesc; -import org.apache.hadoop.hive.ql.session.LineageState; -import org.apache.hadoop.hive.ql.stats.StatsUtils; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan; - -/** - * Index handler for the bitmap index. Bitmap index uses an EWAH-compressed - * bitmap to represent the values in a table. - */ -public class BitmapIndexHandler extends TableBasedIndexHandler { - - private Configuration configuration; - private static final Logger LOG = LoggerFactory.getLogger(BitmapIndexHandler.class.getName()); - - @Override - public void generateIndexQuery(List indexes, ExprNodeDesc predicate, - ParseContext pctx, HiveIndexQueryContext queryContext) { - - Map indexPredicates = decomposePredicate( - predicate, - indexes, - queryContext); - - if (indexPredicates == null) { - LOG.info("No decomposed predicate found"); - queryContext.setQueryTasks(null); - return; // abort if we couldn't pull out anything from the predicate - } - - List iqs = new ArrayList(indexes.size()); - int i = 0; - for (Index index : indexes) { - ExprNodeDesc indexPredicate = indexPredicates.get(index); - if (indexPredicate != null) { - iqs.add(new BitmapInnerQuery( - index.getIndexTableName(), - indexPredicate, - "ind" + i++)); - } - } - // setup TableScanOperator to change input format for original query - queryContext.setIndexInputFormat(HiveIndexedInputFormat.class.getName()); - - // Build reentrant QL for index query - StringBuilder qlCommand = new StringBuilder("INSERT OVERWRITE DIRECTORY "); - - String tmpFile = pctx.getContext().getMRTmpPath().toUri().toString(); - qlCommand.append( "\"" + tmpFile + "\" "); // QL includes " around file name - qlCommand.append("SELECT bucketname AS `_bucketname` , COLLECT_SET(offset) AS `_offsets` FROM "); - qlCommand.append("(SELECT `_bucketname` AS bucketname , `_offset` AS offset FROM "); - - - BitmapQuery head = iqs.get(0); - for ( i = 1; i < iqs.size(); i++) { - head = new BitmapOuterQuery("oind"+i, head, iqs.get(i)); - } - qlCommand.append(head.toString()); - qlCommand.append(" WHERE NOT EWAH_BITMAP_EMPTY(" + head.getAlias() + ".`_bitmaps`) ) tmp_index GROUP BY bucketname"); - - // generate tasks from index query string - LOG.info("Generating tasks for re-entrant QL query: " + qlCommand.toString()); - HiveConf queryConf = new HiveConf(pctx.getConf(), BitmapIndexHandler.class); - HiveConf.setBoolVar(queryConf, HiveConf.ConfVars.COMPRESSRESULT, false); - Driver driver = new Driver(queryConf, pctx.getQueryState().getLineageState()); - driver.compile(qlCommand.toString(), false); - - queryContext.setIndexIntermediateFile(tmpFile); - queryContext.addAdditionalSemanticInputs(driver.getPlan().getInputs()); - queryContext.setQueryTasks(driver.getPlan().getRootTasks()); - } - - /** - * Split the predicate into the piece we can deal with (pushed), and the one we can't (residual) - * @param predicate - * @param index - * @return - */ - private Map decomposePredicate(ExprNodeDesc predicate, List indexes, - HiveIndexQueryContext queryContext) { - - Map indexPredicates = new HashMap(); - // compute overall residual - IndexPredicateAnalyzer analyzer = getIndexPredicateAnalyzer(indexes, queryContext.getQueryPartitions()); - List searchConditions = new ArrayList(); - ExprNodeDesc residualPredicate = analyzer.analyzePredicate(predicate, searchConditions); - // pass residual predicate back out for further processing - queryContext.setResidualPredicate(residualPredicate); - - if (searchConditions.size() == 0) { - return null; - } - - for (Index index : indexes) { - ArrayList in = new ArrayList(1); - in.add(index); - analyzer = getIndexPredicateAnalyzer(in, queryContext.getQueryPartitions()); - searchConditions = new ArrayList(); - // split predicate into pushed (what we can handle), and residual (what we can't handle) - // pushed predicate from translateSearchConditions is stored for the current index - // This ensures that we apply all possible predicates to each index - analyzer.analyzePredicate(predicate, searchConditions); - if (searchConditions.size() == 0) { - indexPredicates.put(index, null); - } else { - indexPredicates.put(index, analyzer.translateSearchConditions(searchConditions)); - } - } - - return indexPredicates; - } - - /** - * Instantiate a new predicate analyzer suitable for determining - * whether we can use an index, based on rules for indexes in - * WHERE clauses that we support - * - * @return preconfigured predicate analyzer for WHERE queries - */ - private IndexPredicateAnalyzer getIndexPredicateAnalyzer(List indexes, Set queryPartitions) { - IndexPredicateAnalyzer analyzer = new IndexPredicateAnalyzer(); - - analyzer.addComparisonOp(GenericUDFOPEqual.class.getName()); - analyzer.addComparisonOp(GenericUDFOPLessThan.class.getName()); - analyzer.addComparisonOp(GenericUDFOPEqualOrLessThan.class.getName()); - analyzer.addComparisonOp(GenericUDFOPGreaterThan.class.getName()); - analyzer.addComparisonOp(GenericUDFOPEqualOrGreaterThan.class.getName()); - - // only return results for columns in the list of indexes - for (Index index : indexes) { - List columnSchemas = index.getSd().getCols(); - for (FieldSchema column : columnSchemas) { - analyzer.allowColumnName(column.getName()); - } - } - - // partitioned columns are treated as if they have indexes so that the partitions - // are used during the index query generation - for (Partition part : queryPartitions) { - if (part.getSpec().isEmpty()) { - continue; // empty partitions are from whole tables, so we don't want to add them in - } - for (String column : part.getSpec().keySet()) { - analyzer.allowColumnName(column); - } - } - - return analyzer; - } - - @Override - public void analyzeIndexDefinition(Table baseTable, Index index, - Table indexTable) throws HiveException { - StorageDescriptor storageDesc = index.getSd(); - if (this.usesIndexTable() && indexTable != null) { - StorageDescriptor indexTableSd = storageDesc.deepCopy(); - List indexTblCols = indexTableSd.getCols(); - FieldSchema bucketFileName = new FieldSchema("_bucketname", "string", ""); - indexTblCols.add(bucketFileName); - FieldSchema offSets = new FieldSchema("_offset", "bigint", ""); - indexTblCols.add(offSets); - FieldSchema bitmaps = new FieldSchema("_bitmaps", "array", ""); - indexTblCols.add(bitmaps); - indexTable.setSd(indexTableSd); - } - } - - @Override - protected Task getIndexBuilderMapRedTask(Set inputs, Set outputs, - List indexField, boolean partitioned, - PartitionDesc indexTblPartDesc, String indexTableName, - PartitionDesc baseTablePartDesc, String baseTableName, String dbName, - LineageState lineageState) throws HiveException { - - HiveConf builderConf = new HiveConf(getConf(), BitmapIndexHandler.class); - HiveConf.setBoolVar(builderConf, HiveConf.ConfVars.HIVEROWOFFSET, true); - - String indexCols = HiveUtils.getUnparsedColumnNamesFromFieldSchema(indexField); - - //form a new insert overwrite query. - StringBuilder command= new StringBuilder(); - LinkedHashMap partSpec = indexTblPartDesc.getPartSpec(); - - String fullIndexTableName = StatsUtils.getFullyQualifiedTableName(HiveUtils.unparseIdentifier(dbName), - HiveUtils.unparseIdentifier(indexTableName)); - command.append("INSERT OVERWRITE TABLE " + fullIndexTableName); - if (partitioned && indexTblPartDesc != null) { - command.append(" PARTITION ( "); - List ret = getPartKVPairStringArray(partSpec); - for (int i = 0; i < ret.size(); i++) { - String partKV = ret.get(i); - command.append(partKV); - if (i < ret.size() - 1) { - command.append(","); - } - } - command.append(" ) "); - } - - String fullBaseTableName = StatsUtils.getFullyQualifiedTableName(HiveUtils.unparseIdentifier(dbName), - HiveUtils.unparseIdentifier(baseTableName)); - command.append(" SELECT "); - command.append(indexCols); - command.append(","); - command.append(VirtualColumn.FILENAME.getName()); - command.append(","); - command.append(VirtualColumn.BLOCKOFFSET.getName()); - command.append(","); - command.append("EWAH_BITMAP("); - command.append(VirtualColumn.ROWOFFSET.getName()); - command.append(")"); - command.append(" FROM " + fullBaseTableName); - LinkedHashMap basePartSpec = baseTablePartDesc.getPartSpec(); - if(basePartSpec != null) { - command.append(" WHERE "); - List pkv = getPartKVPairStringArray(basePartSpec); - for (int i = 0; i < pkv.size(); i++) { - String partKV = pkv.get(i); - command.append(partKV); - if (i < pkv.size() - 1) { - command.append(" AND "); - } - } - } - command.append(" GROUP BY "); - command.append(VirtualColumn.FILENAME.getName()); - command.append(","); - command.append(VirtualColumn.BLOCKOFFSET.getName()); - for (FieldSchema fieldSchema : indexField) { - command.append(","); - command.append(HiveUtils.unparseIdentifier(fieldSchema.getName())); - } - - // Require clusterby ROWOFFSET if map-size aggregation is off. - // TODO: Make this work without map side aggregation - if (!builderConf.get("hive.map.aggr", null).equals("true")) { - throw new HiveException("Cannot construct index without map-side aggregation"); - } - - Task rootTask = IndexUtils.createRootTask(builderConf, inputs, outputs, - command, partSpec, indexTableName, dbName, lineageState); - return rootTask; - } - - @Override - /** - * No lower bound on bitmap index query size, so this will always return true - */ - public boolean checkQuerySize(long querySize, HiveConf hiveConf) { - return true; - } - - @Override - public boolean usesIndexTable() { - return true; - } - -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapInnerQuery.java ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapInnerQuery.java deleted file mode 100644 index c7500a56b8..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapInnerQuery.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.index.bitmap; - -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; -import org.apache.hadoop.hive.ql.metadata.HiveUtils; -import org.apache.hadoop.hive.ql.index.bitmap.BitmapQuery; - -/** - * Representation of inner bitmap index SELECT query that scans bitmap index - * tables for a pushed predicate - */ -public class BitmapInnerQuery implements BitmapQuery { - private String tableName; - private ExprNodeDesc predicate; - private String alias; - private String queryStr; - - public BitmapInnerQuery(String tableName, ExprNodeDesc predicate, String alias) { - this.tableName = tableName; - this.predicate = predicate; - this.alias = alias; - constructQueryStr(); - } - - /** - * Return a string representation of the query string for compilation - */ - public String toString() { - return queryStr; - } - - /** - * Construct a string representation of the query to be compiled - */ - private void constructQueryStr() { - StringBuilder sb = new StringBuilder(); - sb.append("(SELECT * FROM "); - sb.append(HiveUtils.unparseIdentifier(tableName)); - sb.append(" WHERE "); - sb.append(predicate.getExprString()); - sb.append(") "); - sb.append(alias); - queryStr = sb.toString(); - } - - /** - * Return the assigned alias of the SELECT statement - */ - public String getAlias() { - return alias; - } - -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapObjectInput.java ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapObjectInput.java deleted file mode 100644 index 0f312a36b9..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapObjectInput.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.index.bitmap; - -import java.io.IOException; -import java.io.ObjectInput; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.lazy.LazyLong; - -/** - * An ObjectInput that allows for conversion from an List of LongWritable - * to an EWAH-compressed bitmap. - */ -public class BitmapObjectInput implements ObjectInput { - Iterator bufferIter; - List buffer; - - public BitmapObjectInput() { - buffer = new ArrayList(); - bufferIter = buffer.iterator(); - } - - public BitmapObjectInput(List l) { - readFromList(l); - } - - public void readFromList(List l) { - buffer = l; - bufferIter = buffer.iterator(); - } - - @Override - public int available() throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void close() throws IOException { - throw new UnsupportedOperationException(); - - } - - @Override - public int read() throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public int read(byte[] arg0) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public int read(byte[] arg0, int arg1, int arg2) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public Object readObject() throws ClassNotFoundException, IOException { - throw new UnsupportedOperationException(); - } - - @Override - public long skip(long arg0) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public boolean readBoolean() throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public byte readByte() throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public char readChar() throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public double readDouble() throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public float readFloat() throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void readFully(byte[] arg0) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void readFully(byte[] arg0, int arg1, int arg2) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public int readInt() throws IOException { - if (bufferIter.hasNext()) { - LongObjectInspector loi = PrimitiveObjectInspectorFactory.writableLongObjectInspector; - Long l = PrimitiveObjectInspectorUtils.getLong(bufferIter.next(), loi); - return l.intValue(); - //return bufferIter.next().intValue(); - } - else { - throw new IOException(); - } - } - - @Override - public String readLine() throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public long readLong() throws IOException { - //LongObjectInspector loi = PrimitiveObjectInspectorFactory.writableLongObjectInspector; - if (bufferIter.hasNext()) { - LongObjectInspector loi = PrimitiveObjectInspectorFactory.writableLongObjectInspector; - return PrimitiveObjectInspectorUtils.getLong(bufferIter.next(), loi); - //return bufferIter.next(); - } - else { - throw new IOException(); - } - } - - @Override - public short readShort() throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public String readUTF() throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public int readUnsignedByte() throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public int readUnsignedShort() throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public int skipBytes(int n) throws IOException { - throw new UnsupportedOperationException(); - } - - -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapObjectOutput.java ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapObjectOutput.java deleted file mode 100644 index e9d959d705..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapObjectOutput.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.index.bitmap; - -import java.io.IOException; -import java.io.ObjectOutput; -import java.util.ArrayList; -import java.util.List; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; - -/** - * An ObjectOutput that allows conversion from an EWAH-compressed bitmap - * to an List of LongWritable. - */ -public class BitmapObjectOutput implements ObjectOutput { - ArrayList buffer = new ArrayList(); - - public List list() { - return buffer; - } - - @Override - public void close() throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void flush() throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void write(int arg0) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void write(byte[] arg0) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void write(byte[] arg0, int arg1, int arg2) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void writeObject(Object arg0) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void writeBoolean(boolean arg0) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void writeByte(int arg0) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void writeBytes(String arg0) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void writeChar(int arg0) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void writeChars(String arg0) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void writeDouble(double v) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void writeFloat(float v) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void writeInt(int v) throws IOException { - buffer.add(new LongWritable(v)); - } - - @Override - public void writeLong(long v) throws IOException { - buffer.add(new LongWritable(v)); - } - - @Override - public void writeShort(int v) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void writeUTF(String s) throws IOException { - throw new UnsupportedOperationException(); - } - -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapOuterQuery.java ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapOuterQuery.java deleted file mode 100644 index 135b1ed7e9..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapOuterQuery.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.index.bitmap; - -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; -import org.apache.hadoop.hive.ql.index.bitmap.BitmapQuery; - -/** - * Representation of the outer query on bitmap indexes that JOINs the result of - * inner SELECT scans on bitmap indexes (represented in BitmapQuery objects) - * using EWAH_* bitwise operations - */ -public class BitmapOuterQuery implements BitmapQuery { - private String alias; - private BitmapQuery lhs; - private BitmapQuery rhs; - private String queryStr; - - public BitmapOuterQuery(String alias, BitmapQuery lhs, BitmapQuery rhs) { - this.alias = alias; - this.lhs = lhs; - this.rhs = rhs; - constructQueryStr(); - } - - public String getAlias() { - return alias; - } - - /** - * Return a string representation of the query for compilation - */ - public String toString() { - return queryStr; - } - - /** - * Construct a string representation of the query to be compiled - */ - private void constructQueryStr() { - StringBuilder sb = new StringBuilder(); - sb.append("(SELECT "); - sb.append(lhs.getAlias()); - sb.append(".`_bucketname`, "); - sb.append(rhs.getAlias()); - sb.append(".`_offset`, "); - sb.append("EWAH_BITMAP_AND("); - sb.append(lhs.getAlias()); - sb.append(".`_bitmaps`, "); - sb.append(rhs.getAlias()); - sb.append(".`_bitmaps`) AS `_bitmaps` FROM "); - sb.append(lhs.toString()); - sb.append(" JOIN "); - sb.append(rhs.toString()); - sb.append(" ON "); - sb.append(lhs.getAlias()); - sb.append(".`_bucketname` = "); - sb.append(rhs.getAlias()); - sb.append(".`_bucketname` AND "); - sb.append(lhs.getAlias()); - sb.append(".`_offset` = "); - sb.append(rhs.getAlias()); - sb.append(".`_offset`) "); - sb.append(this.alias); - queryStr = sb.toString(); - } - -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapQuery.java ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapQuery.java deleted file mode 100644 index 4b1ff46eb7..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapQuery.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.index.bitmap; -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; - -/** - * Generic interface to representations of queries on bitmap indexes - */ -public interface BitmapQuery { - public String getAlias(); - - public String toString(); -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java deleted file mode 100644 index c4d02eef72..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java +++ /dev/null @@ -1,408 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.index.compact; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Set; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.common.JavaUtils; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.ql.Driver; -import org.apache.hadoop.hive.ql.exec.FilterOperator; -import org.apache.hadoop.hive.ql.exec.Operator; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.index.HiveIndexQueryContext; -import org.apache.hadoop.hive.ql.index.IndexPredicateAnalyzer; -import org.apache.hadoop.hive.ql.index.IndexSearchCondition; -import org.apache.hadoop.hive.ql.index.TableBasedIndexHandler; -import org.apache.hadoop.hive.ql.io.HiveInputFormat; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler.DecomposedPredicate; -import org.apache.hadoop.hive.ql.metadata.HiveUtils; -import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.metadata.VirtualColumn; -import org.apache.hadoop.hive.ql.optimizer.IndexUtils; -import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; -import org.apache.hadoop.hive.ql.plan.MapWork; -import org.apache.hadoop.hive.ql.plan.MapredWork; -import org.apache.hadoop.hive.ql.plan.OperatorDesc; -import org.apache.hadoop.hive.ql.plan.PartitionDesc; -import org.apache.hadoop.hive.ql.session.LineageState; -import org.apache.hadoop.hive.ql.stats.StatsUtils; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan; - -public class CompactIndexHandler extends TableBasedIndexHandler { - - // The names of the partition columns - private Set partitionCols; - // Whether or not the conditions have been met to use the fact the index is sorted - private boolean useSorted; - private static final Logger LOG = LoggerFactory.getLogger(CompactIndexHandler.class.getName()); - - - @Override - public void analyzeIndexDefinition(Table baseTable, Index index, - Table indexTable) throws HiveException { - StorageDescriptor storageDesc = index.getSd(); - if (this.usesIndexTable() && indexTable != null) { - StorageDescriptor indexTableSd = storageDesc.deepCopy(); - List indexTblCols = indexTableSd.getCols(); - FieldSchema bucketFileName = new FieldSchema("_bucketname", "string", ""); - indexTblCols.add(bucketFileName); - FieldSchema offSets = new FieldSchema("_offsets", "array", ""); - indexTblCols.add(offSets); - indexTable.setSd(indexTableSd); - } - } - - @Override - protected Task getIndexBuilderMapRedTask(Set inputs, Set outputs, - List indexField, boolean partitioned, - PartitionDesc indexTblPartDesc, String indexTableName, - PartitionDesc baseTablePartDesc, String baseTableName, String dbName, - LineageState lineageState) throws HiveException { - - String indexCols = HiveUtils.getUnparsedColumnNamesFromFieldSchema(indexField); - - //form a new insert overwrite query. - StringBuilder command= new StringBuilder(); - LinkedHashMap partSpec = indexTblPartDesc.getPartSpec(); - - String fullIndexTableName = StatsUtils.getFullyQualifiedTableName(HiveUtils.unparseIdentifier(dbName), - HiveUtils.unparseIdentifier(indexTableName)); - command.append("INSERT OVERWRITE TABLE " + fullIndexTableName); - if (partitioned && indexTblPartDesc != null) { - command.append(" PARTITION ( "); - List ret = getPartKVPairStringArray(partSpec); - for (int i = 0; i < ret.size(); i++) { - String partKV = ret.get(i); - command.append(partKV); - if (i < ret.size() - 1) { - command.append(","); - } - } - command.append(" ) "); - } - - String fullBaseTableName = StatsUtils.getFullyQualifiedTableName(HiveUtils.unparseIdentifier(dbName), - HiveUtils.unparseIdentifier(baseTableName)); - command.append(" SELECT "); - command.append(indexCols); - command.append(","); - - command.append(VirtualColumn.FILENAME.getName()); - command.append(","); - command.append(" collect_set ("); - command.append(VirtualColumn.BLOCKOFFSET.getName()); - command.append(") "); - command.append(" FROM " + fullBaseTableName); - LinkedHashMap basePartSpec = baseTablePartDesc.getPartSpec(); - if(basePartSpec != null) { - command.append(" WHERE "); - List pkv = getPartKVPairStringArray(basePartSpec); - for (int i = 0; i < pkv.size(); i++) { - String partKV = pkv.get(i); - command.append(partKV); - if (i < pkv.size() - 1) { - command.append(" AND "); - } - } - } - command.append(" GROUP BY "); - command.append(indexCols + ", " + VirtualColumn.FILENAME.getName()); - - HiveConf builderConf = new HiveConf(getConf(), CompactIndexHandler.class); - builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPFILES, false); - builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPREDFILES, false); - builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGETEZFILES, false); - Task rootTask = IndexUtils.createRootTask(builderConf, inputs, outputs, - command, partSpec, indexTableName, dbName, lineageState); - return rootTask; - } - - @Override - public void generateIndexQuery(List indexes, ExprNodeDesc predicate, - ParseContext pctx, HiveIndexQueryContext queryContext) { - - Index index = indexes.get(0); - DecomposedPredicate decomposedPredicate = decomposePredicate(predicate, index, - queryContext.getQueryPartitions()); - - if (decomposedPredicate == null) { - queryContext.setQueryTasks(null); - return; // abort if we couldn't pull out anything from the predicate - } - - // pass residual predicate back out for further processing - queryContext.setResidualPredicate(decomposedPredicate.residualPredicate); - // setup TableScanOperator to change input format for original query - queryContext.setIndexInputFormat(HiveCompactIndexInputFormat.class.getName()); - - // Build reentrant QL for index query - StringBuilder qlCommand = new StringBuilder("INSERT OVERWRITE DIRECTORY "); - - String tmpFile = pctx.getContext().getMRTmpPath().toUri().toString(); - queryContext.setIndexIntermediateFile(tmpFile); - qlCommand.append( "\"" + tmpFile + "\" "); // QL includes " around file name - qlCommand.append("SELECT `_bucketname` , `_offsets` FROM "); - qlCommand.append(HiveUtils.unparseIdentifier(index.getIndexTableName())); - qlCommand.append(" WHERE "); - - String predicateString = decomposedPredicate.pushedPredicate.getExprString(); - qlCommand.append(predicateString); - - // generate tasks from index query string - LOG.info("Generating tasks for re-entrant QL query: " + qlCommand.toString()); - HiveConf queryConf = new HiveConf(pctx.getConf(), CompactIndexHandler.class); - HiveConf.setBoolVar(queryConf, HiveConf.ConfVars.COMPRESSRESULT, false); - Driver driver = new Driver(queryConf, pctx.getQueryState().getLineageState()); - driver.compile(qlCommand.toString(), false); - - if (pctx.getConf().getBoolVar(ConfVars.HIVE_INDEX_COMPACT_BINARY_SEARCH) && useSorted) { - // For now, only works if the predicate is a single condition - MapWork work = null; - String originalInputFormat = null; - for (Task task : driver.getPlan().getRootTasks()) { - // The index query should have one and only one map reduce task in the root tasks - // Otherwise something is wrong, log the problem and continue using the default format - if (task.getWork() instanceof MapredWork) { - if (work != null) { - LOG.error("Tried to use a binary search on a compact index but there were an " + - "unexpected number (>1) of root level map reduce tasks in the " + - "reentrant query plan."); - work.setInputformat(null); - work.setInputFormatSorted(false); - break; - } - if (task.getWork() != null) { - work = ((MapredWork)task.getWork()).getMapWork(); - } - String inputFormat = work.getInputformat(); - originalInputFormat = inputFormat; - if (inputFormat == null) { - inputFormat = HiveConf.getVar(pctx.getConf(), HiveConf.ConfVars.HIVEINPUTFORMAT); - } - - // We can only perform a binary search with HiveInputFormat and CombineHiveInputFormat - // and BucketizedHiveInputFormat - try { - if (!HiveInputFormat.class.isAssignableFrom(JavaUtils.loadClass(inputFormat))) { - work = null; - break; - } - } catch (ClassNotFoundException e) { - LOG.error("Map reduce work's input format class: " + inputFormat + " was not found. " + - "Cannot use the fact the compact index is sorted."); - work = null; - break; - } - - work.setInputFormatSorted(true); - } - } - - if (work != null) { - // Find the filter operator and expr node which act on the index column and mark them - if (!findIndexColumnFilter(work.getAliasToWork().values())) { - LOG.error("Could not locate the index column's filter operator and expr node. Cannot " + - "use the fact the compact index is sorted."); - work.setInputformat(originalInputFormat); - work.setInputFormatSorted(false); - } - } - } - - - queryContext.addAdditionalSemanticInputs(driver.getPlan().getInputs()); - queryContext.setQueryTasks(driver.getPlan().getRootTasks()); - return; - } - - /** - * Does a depth first search on the operator tree looking for a filter operator whose predicate - * has one child which is a column which is not in the partition - * @param operators - * @return whether or not it has found its target - */ - private boolean findIndexColumnFilter( - Collection> operators) { - for (Operator op : operators) { - if (op instanceof FilterOperator && - ((FilterOperator)op).getConf().getPredicate().getChildren() != null) { - // Is this the target - if (findIndexColumnExprNodeDesc(((FilterOperator)op).getConf().getPredicate())) { - ((FilterOperator)op).getConf().setSortedFilter(true); - return true; - } - } - - // If the target has been found, no need to continue - if (findIndexColumnFilter(op.getChildOperators())) { - return true; - } - } - return false; - } - - private boolean findIndexColumnExprNodeDesc(ExprNodeDesc expression) { - if (expression.getChildren() == null) { - return false; - } - - if (expression.getChildren().size() == 2) { - ExprNodeColumnDesc columnDesc = null; - if (expression.getChildren().get(0) instanceof ExprNodeColumnDesc) { - columnDesc = (ExprNodeColumnDesc)expression.getChildren().get(0); - } else if (expression.getChildren().get(1) instanceof ExprNodeColumnDesc) { - columnDesc = (ExprNodeColumnDesc)expression.getChildren().get(1); - } - - // Is this the target - if (columnDesc != null && !partitionCols.contains(columnDesc.getColumn())) { - assert expression instanceof ExprNodeGenericFuncDesc : - "Expression containing index column is does not support sorting, should not try" + - "and sort"; - ((ExprNodeGenericFuncDesc)expression).setSortedExpr(true); - return true; - } - } - - for (ExprNodeDesc child : expression.getChildren()) { - // If the target has been found, no need to continue - if (findIndexColumnExprNodeDesc(child)) { - return true; - } - } - return false; - } - - /** - * Split the predicate into the piece we can deal with (pushed), and the one we can't (residual) - * @param predicate - * @param index - * @return - */ - private DecomposedPredicate decomposePredicate(ExprNodeDesc predicate, Index index, - Set queryPartitions) { - IndexPredicateAnalyzer analyzer = getIndexPredicateAnalyzer(index, queryPartitions); - List searchConditions = new ArrayList(); - // split predicate into pushed (what we can handle), and residual (what we can't handle) - ExprNodeGenericFuncDesc residualPredicate = (ExprNodeGenericFuncDesc)analyzer. - analyzePredicate(predicate, searchConditions); - - if (searchConditions.size() == 0) { - return null; - } - - int numIndexCols = 0; - for (IndexSearchCondition searchCondition : searchConditions) { - if (!partitionCols.contains(searchCondition.getColumnDesc().getColumn())) { - numIndexCols++; - } - } - - // For now, only works if the predicate has a single condition on an index column - if (numIndexCols == 1) { - useSorted = true; - } else { - useSorted = false; - } - - DecomposedPredicate decomposedPredicate = new DecomposedPredicate(); - decomposedPredicate.pushedPredicate = analyzer.translateSearchConditions(searchConditions); - decomposedPredicate.residualPredicate = residualPredicate; - - return decomposedPredicate; - } - - /** - * Instantiate a new predicate analyzer suitable for determining - * whether we can use an index, based on rules for indexes in - * WHERE clauses that we support - * - * @return preconfigured predicate analyzer for WHERE queries - */ - private IndexPredicateAnalyzer getIndexPredicateAnalyzer(Index index, Set queryPartitions) { - IndexPredicateAnalyzer analyzer = new IndexPredicateAnalyzer(); - - analyzer.addComparisonOp(GenericUDFOPEqual.class.getName()); - analyzer.addComparisonOp(GenericUDFOPLessThan.class.getName()); - analyzer.addComparisonOp(GenericUDFOPEqualOrLessThan.class.getName()); - analyzer.addComparisonOp(GenericUDFOPGreaterThan.class.getName()); - analyzer.addComparisonOp(GenericUDFOPEqualOrGreaterThan.class.getName()); - - // only return results for columns in this index - List columnSchemas = index.getSd().getCols(); - for (FieldSchema column : columnSchemas) { - analyzer.allowColumnName(column.getName()); - } - - // partitioned columns are treated as if they have indexes so that the partitions - // are used during the index query generation - partitionCols = new HashSet(); - for (Partition part : queryPartitions) { - if (part.getSpec().isEmpty()) { - continue; // empty partitions are from whole tables, so we don't want to add them in - } - for (String column : part.getSpec().keySet()) { - analyzer.allowColumnName(column); - partitionCols.add(column); - } - } - - return analyzer; - } - - - @Override - public boolean checkQuerySize(long querySize, HiveConf hiveConf) { - long minSize = hiveConf.getLongVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER_COMPACT_MINSIZE); - long maxSize = hiveConf.getLongVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER_COMPACT_MAXSIZE); - if (maxSize < 0) { - maxSize = Long.MAX_VALUE; - } - return (querySize > minSize & querySize < maxSize); - } - - @Override - public boolean usesIndexTable() { - return true; - } - -} diff --git ql/src/java/org/apache/hadoop/hive/ql/index/compact/HiveCompactIndexInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/index/compact/HiveCompactIndexInputFormat.java deleted file mode 100644 index 6d9c968faa..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/index/compact/HiveCompactIndexInputFormat.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.index.compact; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat; - -public class HiveCompactIndexInputFormat extends HiveIndexedInputFormat { - - public static final Logger l4j = - LoggerFactory.getLogger(HiveCompactIndexInputFormat.class.getSimpleName()); - - public HiveCompactIndexInputFormat() { - super("hive.index.compact.file"); - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index 856b026c91..316f9b5c31 100755 --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; +import java.util.Comparator; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; @@ -41,6 +42,7 @@ import org.apache.hive.common.util.Ref; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -105,6 +107,22 @@ protected Map pathToPartitionInfo; protected MapWork mrwork; + public static final class HiveInputSplitComparator implements Comparator { + @Override + public int compare(HiveInputSplit o1, HiveInputSplit o2) { + int pathCompare = comparePath(o1.getPath(), o2.getPath()); + if (pathCompare != 0) { + return pathCompare; + } + return Long.compare(o1.getStart(), o2.getStart()); + } + + private int comparePath(Path p1, Path p2) { + return p1.compareTo(p2); + } + } + + /** * HiveInputSplit encapsulates an InputSplit with its corresponding * inputFormatClass. The reason that it derives from FileSplit is to make sure @@ -113,6 +131,7 @@ public static class HiveInputSplit extends FileSplit implements InputSplit, Configurable { + InputSplit inputSplit; String inputFormatClassName; diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java index 2930a46085..4d46f59839 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java @@ -97,8 +97,7 @@ public boolean hasPpd() { public void configure(HiveConf queryConfig) { this.conf = queryConfig; this.sarg = ConvertAstToSearchArg.createFromConf(conf); - this.isPpdEnabled = HiveConf.getBoolVar(conf, ConfVars.HIVEOPTINDEXFILTER) - && HiveConf.getBoolVar(conf, ConfVars.HIVE_ORC_MS_FOOTER_CACHE_PPD); + this.isPpdEnabled = HiveConf.getBoolVar(conf, ConfVars.HIVE_ORC_MS_FOOTER_CACHE_PPD); this.isInTest = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST); this.sargIsOriginal = this.sargNotIsOriginal = null; } @@ -161,7 +160,9 @@ private int getAndVerifyIndex(HashMap posMap, private boolean processBbResult( ByteBuffer bb, int ix, HdfsFileStatusWithId file, OrcTail[] result) throws IOException { - if (bb == null) return true; + if (bb == null) { + return true; + } result[ix] = createOrcTailFromMs(file, bb); if (result[ix] == null) { return false; @@ -173,7 +174,10 @@ private boolean processBbResult( private void processPpdResult(MetadataPpdResult mpr, HdfsFileStatusWithId file, int ix, OrcTail[] result, ByteBuffer[] ppdResult) throws IOException { - if (mpr == null) return; // This file is unknown to metastore. + if (mpr == null) + { + return; // This file is unknown to metastore. + } ppdResult[ix] = mpr.isSetIncludeBitset() ? mpr.bufferForIncludeBitset() : NO_SPLIT_AFTER_PPD; if (mpr.isSetMetadata()) { @@ -187,7 +191,9 @@ private void processPpdResult(MetadataPpdResult mpr, HdfsFileStatusWithId file, private List determineFileIdsToQuery( List files, OrcTail[] result, HashMap posMap) { for (int i = 0; i < result.length; ++i) { - if (result[i] != null) continue; + if (result[i] != null) { + continue; + } HdfsFileStatusWithId file = files.get(i); final FileStatus fs = file.getFileStatus(); Long fileId = file.getFileId(); @@ -224,9 +230,13 @@ public Long getFileId() { } private ByteBuffer getSerializedSargForMetastore(boolean isOriginal) { - if (sarg == null) return null; + if (sarg == null) { + return null; + } ByteBuffer serializedSarg = isOriginal ? sargIsOriginal : sargNotIsOriginal; - if (serializedSarg != null) return serializedSarg; + if (serializedSarg != null) { + return serializedSarg; + } SearchArgument sarg2 = sarg; Kryo kryo = SerializationUtilities.borrowKryo(); try { @@ -292,7 +302,9 @@ public static void translateSargToTableColIndexes( private static OrcTail createOrcTailFromMs( HdfsFileStatusWithId file, ByteBuffer bb) throws IOException { - if (bb == null) return null; + if (bb == null) { + return null; + } FileStatus fs = file.getFileStatus(); ByteBuffer copy = bb.duplicate(); try { diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index c8d1589f44..83cfa38014 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -19,11 +19,6 @@ package org.apache.hadoop.hive.ql.metadata; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; -import static org.apache.hadoop.hive.serde.serdeConstants.COLLECTION_DELIM; -import static org.apache.hadoop.hive.serde.serdeConstants.ESCAPE_CHAR; -import static org.apache.hadoop.hive.serde.serdeConstants.FIELD_DELIM; -import static org.apache.hadoop.hive.serde.serdeConstants.LINE_DELIM; -import static org.apache.hadoop.hive.serde.serdeConstants.MAPKEY_DELIM; import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT; import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME; @@ -108,7 +103,6 @@ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.HiveObjectRef; import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InsertEventRequestData; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.Materialization; @@ -116,7 +110,6 @@ import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; import org.apache.hadoop.hive.metastore.api.PrincipalType; @@ -127,11 +120,9 @@ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMMapping; @@ -150,7 +141,6 @@ import org.apache.hadoop.hive.ql.exec.FunctionUtils; import org.apache.hadoop.hive.ql.exec.SerializationUtilities; import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.index.HiveIndexHandler; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; import org.apache.hadoop.hive.ql.log.PerfLogger; @@ -668,34 +658,6 @@ public void alterTable(String dbName, String tblName, Table newTbl, boolean casc } } - public void alterIndex(String baseTableName, String indexName, Index newIdx) - throws InvalidOperationException, HiveException { - String[] names = Utilities.getDbTableName(baseTableName); - alterIndex(names[0], names[1], indexName, newIdx); - } - - /** - * Updates the existing index metadata with the new metadata. - * - * @param idxName - * name of the existing index - * @param newIdx - * new name of the index. could be the old name - * @throws InvalidOperationException - * if the changes in metadata is not acceptable - * @throws TException - */ - public void alterIndex(String dbName, String baseTblName, String idxName, Index newIdx) - throws InvalidOperationException, HiveException { - try { - getMSC().alter_index(dbName, baseTblName, idxName, newIdx); - } catch (MetaException e) { - throw new HiveException("Unable to alter index. " + e.getMessage(), e); - } catch (TException e) { - throw new HiveException("Unable to alter index. " + e.getMessage(), e); - } - } - /** * Updates the existing partition metadata with the new metadata. * @@ -934,243 +896,6 @@ public void createTable(Table tbl, boolean ifNotExists) throws HiveException { } /** - * - * @param tableName - * table name - * @param indexName - * index name - * @param indexHandlerClass - * index handler class - * @param indexedCols - * index columns - * @param indexTblName - * index table's name - * @param deferredRebuild - * referred build index table's data - * @param inputFormat - * input format - * @param outputFormat - * output format - * @param serde - * @param storageHandler - * index table's storage handler - * @param location - * location - * @param idxProps - * idx - * @param serdeProps - * serde properties - * @param collItemDelim - * @param fieldDelim - * @param fieldEscape - * @param lineDelim - * @param mapKeyDelim - * @throws HiveException - */ - public void createIndex(String tableName, String indexName, String indexHandlerClass, - List indexedCols, String indexTblName, boolean deferredRebuild, - String inputFormat, String outputFormat, String serde, - String storageHandler, String location, - Map idxProps, Map tblProps, Map serdeProps, - String collItemDelim, String fieldDelim, String fieldEscape, - String lineDelim, String mapKeyDelim, String indexComment) - throws HiveException { - - try { - String tdname = Utilities.getDatabaseName(tableName); - String idname = Utilities.getDatabaseName(indexTblName); - if (!idname.equals(tdname)) { - throw new HiveException("Index on different database (" + idname - + ") from base table (" + tdname + ") is not supported."); - } - - Index old_index = null; - try { - old_index = getIndex(tableName, indexName); - } catch (Exception e) { - } - if (old_index != null) { - throw new HiveException("Index " + indexName + " already exists on table " + tableName); - } - - org.apache.hadoop.hive.metastore.api.Table baseTbl = getTable(tableName).getTTable(); - if (TableType.VIRTUAL_VIEW.toString().equals(baseTbl.getTableType())) { - throw new HiveException("tableName="+ tableName +" is a VIRTUAL VIEW. Index on VIRTUAL VIEW is not supported."); - } - if (baseTbl.isTemporary()) { - throw new HiveException("tableName=" + tableName - + " is a TEMPORARY TABLE. Index on TEMPORARY TABLE is not supported."); - } - - org.apache.hadoop.hive.metastore.api.Table temp = null; - try { - temp = getTable(indexTblName).getTTable(); - } catch (Exception e) { - } - if (temp != null) { - throw new HiveException("Table name " + indexTblName + " already exists. Choose another name."); - } - - SerDeInfo serdeInfo = new SerDeInfo(); - serdeInfo.setName(indexTblName); - - if(serde != null) { - serdeInfo.setSerializationLib(serde); - } else { - if (storageHandler == null) { - serdeInfo.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName()); - } else { - HiveStorageHandler sh = HiveUtils.getStorageHandler(getConf(), storageHandler); - String serDeClassName = sh.getSerDeClass().getName(); - serdeInfo.setSerializationLib(serDeClassName); - } - } - - serdeInfo.setParameters(new HashMap()); - if (fieldDelim != null) { - serdeInfo.getParameters().put(FIELD_DELIM, fieldDelim); - serdeInfo.getParameters().put(SERIALIZATION_FORMAT, fieldDelim); - } - if (fieldEscape != null) { - serdeInfo.getParameters().put(ESCAPE_CHAR, fieldEscape); - } - if (collItemDelim != null) { - serdeInfo.getParameters().put(COLLECTION_DELIM, collItemDelim); - } - if (mapKeyDelim != null) { - serdeInfo.getParameters().put(MAPKEY_DELIM, mapKeyDelim); - } - if (lineDelim != null) { - serdeInfo.getParameters().put(LINE_DELIM, lineDelim); - } - - if (serdeProps != null) { - Iterator> iter = serdeProps.entrySet() - .iterator(); - while (iter.hasNext()) { - Entry m = iter.next(); - serdeInfo.getParameters().put(m.getKey(), m.getValue()); - } - } - - List indexTblCols = new ArrayList(); - List sortCols = new ArrayList(); - int k = 0; - Table metaBaseTbl = new Table(baseTbl); - // Even though we are storing these in metastore, get regular columns. Indexes on lengthy - // types from e.g. Avro schema will just fail to create the index table (by design). - List cols = metaBaseTbl.getCols(); - for (int i = 0; i < cols.size(); i++) { - FieldSchema col = cols.get(i); - if (indexedCols.contains(col.getName())) { - indexTblCols.add(col); - sortCols.add(new Order(col.getName(), 1)); - k++; - } - } - if (k != indexedCols.size()) { - throw new RuntimeException( - "Check the index columns, they should appear in the table being indexed."); - } - - int time = (int) (System.currentTimeMillis() / 1000); - org.apache.hadoop.hive.metastore.api.Table tt = null; - HiveIndexHandler indexHandler = HiveUtils.getIndexHandler(this.getConf(), indexHandlerClass); - - String itname = Utilities.getTableName(indexTblName); - if (indexHandler.usesIndexTable()) { - tt = new org.apache.hadoop.hive.ql.metadata.Table(idname, itname).getTTable(); - List partKeys = baseTbl.getPartitionKeys(); - tt.setPartitionKeys(partKeys); - tt.setTableType(TableType.INDEX_TABLE.toString()); - if (tblProps != null) { - for (Entry prop : tblProps.entrySet()) { - tt.putToParameters(prop.getKey(), prop.getValue()); - } - } - SessionState ss = SessionState.get(); - CreateTableAutomaticGrant grants; - if (ss != null && ((grants = ss.getCreateTableGrants()) != null)) { - PrincipalPrivilegeSet principalPrivs = new PrincipalPrivilegeSet(); - principalPrivs.setUserPrivileges(grants.getUserGrants()); - principalPrivs.setGroupPrivileges(grants.getGroupGrants()); - principalPrivs.setRolePrivileges(grants.getRoleGrants()); - tt.setPrivileges(principalPrivs); - } - } - - if(!deferredRebuild) { - throw new RuntimeException("Please specify deferred rebuild using \" WITH DEFERRED REBUILD \"."); - } - - StorageDescriptor indexSd = new StorageDescriptor( - indexTblCols, - location, - inputFormat, - outputFormat, - false/*compressed - not used*/, - -1/*numBuckets - default is -1 when the table has no buckets*/, - serdeInfo, - null/*bucketCols*/, - sortCols, - null/*parameters*/); - - String ttname = Utilities.getTableName(tableName); - Index indexDesc = new Index(indexName, indexHandlerClass, tdname, ttname, time, time, itname, - indexSd, new HashMap(), deferredRebuild); - if (indexComment != null) { - indexDesc.getParameters().put("comment", indexComment); - } - - if (idxProps != null) - { - indexDesc.getParameters().putAll(idxProps); - } - - indexHandler.analyzeIndexDefinition(baseTbl, indexDesc, tt); - - this.getMSC().createIndex(indexDesc, tt); - - } catch (Exception e) { - throw new HiveException(e); - } - } - - public Index getIndex(String baseTableName, String indexName) throws HiveException { - String[] names = Utilities.getDbTableName(baseTableName); - return this.getIndex(names[0], names[1], indexName); - } - - public Index getIndex(String dbName, String baseTableName, - String indexName) throws HiveException { - try { - return this.getMSC().getIndex(dbName, baseTableName, indexName); - } catch (Exception e) { - throw new HiveException(e); - } - } - - public boolean dropIndex(String baseTableName, String index_name, - boolean throwException, boolean deleteData) throws HiveException { - String[] names = Utilities.getDbTableName(baseTableName); - return dropIndex(names[0], names[1], index_name, throwException, deleteData); - } - - public boolean dropIndex(String db_name, String tbl_name, String index_name, - boolean throwException, boolean deleteData) throws HiveException { - try { - return getMSC().dropIndex(db_name, tbl_name, index_name, deleteData); - } catch (NoSuchObjectException e) { - if (throwException) { - throw new HiveException("Index " + index_name + " doesn't exist. ", e); - } - return false; - } catch (Exception e) { - throw new HiveException(e.getMessage(), e); - } - } - - /** * Drops table along with the data in it. If the table doesn't exist then it * is a no-op. If ifPurge option is specified it is passed to the * hdfs command that removes table data from warehouse to make it skip trash. @@ -1522,8 +1247,9 @@ public Table apply(org.apache.hadoop.hive.metastore.api.Table table) { */ public List getTablesByType(String dbName, String pattern, TableType type) throws HiveException { - if (dbName == null) + if (dbName == null) { dbName = SessionState.get().getCurrentDatabase(); + } try { if (type != null) { @@ -2779,13 +2505,17 @@ private static void addInsertFileInformation(List newFiles, FileSystem fil } addInsertNonDirectoryInformation(p, fileSystem, insertData); } - if (directories == null) return; + if (directories == null) { + return; + } // We don't expect any nesting in most cases, or a lot of it if it is present; union and LB // are some examples where we would have 1, or few, levels respectively. while (!directories.isEmpty()) { Path dir = directories.poll(); FileStatus[] contents = fileSystem.listStatus(dir); - if (contents == null) continue; + if (contents == null) { + continue; + } for (FileStatus status : contents) { if (status.isDirectory()) { directories.add(status.getPath()); @@ -3775,13 +3505,15 @@ static private HiveException getHiveException(Exception e, String msg, String lo ErrorMsg errorMsg = ErrorMsg.getErrorMsg(e); - if (logMsg != null) + if (logMsg != null) { LOG.info(String.format(logMsg, e.getMessage())); + } - if (errorMsg != ErrorMsg.UNRESOLVED_RT_EXCEPTION) + if (errorMsg != ErrorMsg.UNRESOLVED_RT_EXCEPTION) { return new HiveException(e, e.getMessage(), errorMsg, hiveErrMsg); - else + } else { return new HiveException(msg, e); + } } /** @@ -3979,7 +3711,9 @@ private static void moveAcidFiles(String deltaFileType, PathFilter pathFilter, F bucketDest.toUri().toString()); try { fs.rename(bucketSrc, bucketDest); - if (newFiles != null) newFiles.add(bucketDest); + if (newFiles != null) { + newFiles.add(bucketDest); + } } catch (Exception e) { throw getHiveException(e, msg); } @@ -4118,7 +3852,9 @@ private void cleanUpOneDirectoryForReplace(Path path, FileSystem fs, recycleDirToCmPath(path, purge); } FileStatus[] statuses = fs.listStatus(path, pathFilter); - if (statuses == null || statuses.length == 0) return; + if (statuses == null || statuses.length == 0) { + return; + } if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { String s = "Deleting files under " + path + " for replace: "; for (FileStatus file : statuses) { @@ -4342,17 +4078,6 @@ private String getUserName() { } } - public List getIndexes(String dbName, String tblName, short max) throws HiveException { - List indexes = null; - try { - indexes = getMSC().listIndexes(dbName, tblName, max); - } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); - } - return indexes; - } - public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws HiveException { try { return getMSC().setPartitionColumnStatistics(request); @@ -4440,6 +4165,7 @@ public void cancelDelegationToken(String tokenStrForm) /** * @deprecated use {@link #compact2(String, String, String, String, Map)} */ + @Deprecated public void compact(String dbname, String tableName, String partName, String compactType, Map tblproperties) throws HiveException { compact2(dbname, tableName, partName, compactType, tblproperties); @@ -4461,9 +4187,13 @@ public CompactionResponse compact2(String dbname, String tableName, String partN throws HiveException { try { CompactionType cr = null; - if ("major".equals(compactType)) cr = CompactionType.MAJOR; - else if ("minor".equals(compactType)) cr = CompactionType.MINOR; - else throw new RuntimeException("Unknown compaction type " + compactType); + if ("major".equals(compactType)) { + cr = CompactionType.MAJOR; + } else if ("minor".equals(compactType)) { + cr = CompactionType.MINOR; + } else { + throw new RuntimeException("Unknown compaction type " + compactType); + } return getMSC().compact2(dbname, tableName, partName, cr, tblproperties); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java index 16c9834a45..dae18fb9b1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java @@ -26,10 +26,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.tez.TezContext; -import org.apache.hadoop.hive.ql.index.HiveIndexHandler; import org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator; import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider; import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; @@ -315,24 +313,6 @@ private HiveUtils() { // prevent instantiation } - public static HiveIndexHandler getIndexHandler(HiveConf conf, - String indexHandlerClass) throws HiveException { - - if (indexHandlerClass == null) { - return null; - } - try { - Class handlerClass = - (Class) - Class.forName(indexHandlerClass, true, Utilities.getSessionSpecifiedClassLoader()); - HiveIndexHandler indexHandler = ReflectionUtils.newInstance(handlerClass, conf); - return indexHandler; - } catch (ClassNotFoundException e) { - throw new HiveException("Error in loading index handler." - + e.getMessage(), e); - } - } - @SuppressWarnings("unchecked") public static List getMetaStoreAuthorizeProviderManagers( Configuration conf, HiveConf.ConfVars authorizationProviderConfKey, @@ -438,22 +418,6 @@ public static HiveAuthenticationProvider getAuthenticator( return ret; } - - /** - * Convert FieldSchemas to columnNames with backticks around them. - */ - public static String getUnparsedColumnNamesFromFieldSchema( - List fieldSchemas) { - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < fieldSchemas.size(); i++) { - if (i > 0) { - sb.append(","); - } - sb.append(HiveUtils.unparseIdentifier(fieldSchemas.get(i).getName())); - } - return sb.toString(); - } - public static String getLocalDirList(Configuration conf) { if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { TezContext tezContext = (TezContext) TezContext.get(); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java index 3b87824257..a5b6a4b0c3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; @@ -42,8 +41,6 @@ import org.apache.hadoop.hive.metastore.api.WMPoolTrigger; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.ql.index.HiveIndex; -import org.apache.hadoop.hive.ql.index.HiveIndex.IndexType; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -55,7 +52,6 @@ import org.apache.hadoop.hive.ql.metadata.NotNullConstraint; import org.apache.hadoop.hive.ql.plan.DescTableDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc; import org.apache.hadoop.hive.serde2.io.DateWritable; import org.apache.hive.common.util.HiveStringUtils; @@ -136,45 +132,6 @@ static ColumnStatisticsObj getColumnStatisticsObject(String colName, return null; } - public static String getIndexInformation(Index index, boolean isOutputPadded) { - StringBuilder indexInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE); - - List indexColumns = new ArrayList(); - - indexColumns.add(index.getIndexName()); - indexColumns.add(index.getOrigTableName()); - - // index key names - List indexKeys = index.getSd().getCols(); - StringBuilder keyString = new StringBuilder(); - boolean first = true; - for (FieldSchema key : indexKeys) - { - if (!first) - { - keyString.append(", "); - } - keyString.append(key.getName()); - first = false; - } - - indexColumns.add(keyString.toString()); - - indexColumns.add(index.getIndexTableName()); - - // index type - String indexHandlerClass = index.getIndexHandlerClass(); - IndexType indexType = HiveIndex.getIndexTypeByClassName(indexHandlerClass); - indexColumns.add(indexType.getName()); - - String comment = HiveStringUtils.escapeJava(index.getParameters().get("comment")); - indexColumns.add(comment); - - formatOutput(indexColumns.toArray(new String[0]), indexInfo, isOutputPadded, true); - - return indexInfo.toString(); - } - public static String getConstraintsInformation(PrimaryKeyInfo pkInfo, ForeignKeyInfo fkInfo, UniqueConstraint ukInfo, NotNullConstraint nnInfo) { StringBuilder constraintsInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE); @@ -713,12 +670,6 @@ private static void printPadding(StringBuilder tableInfo, int[] columnWidths) { return DescTableDesc.getSchema(showColStats).split("#")[0].split(","); } - public static String getIndexColumnsHeader() { - StringBuilder indexCols = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE); - formatOutput(ShowIndexesDesc.getSchema().split("#")[0].split(","), indexCols); - return indexCols.toString(); - } - public static MetaDataFormatter getFormatter(HiveConf conf) { if ("json".equals(conf.get(HiveConf.ConfVars.HIVE_DDL_OUTPUT_FORMAT.varname, "text"))) { return new JsonMetaDataFormatter(); @@ -802,7 +753,9 @@ private void sortChildren() { if (p2.pool == null) { return (p1.pool == null) ? 0 : -1; } - if (p1.pool == null) return 1; + if (p1.pool == null) { + return 1; + } return Double.compare(p2.pool.getAllocFraction(), p1.pool.getAllocFraction()); }); for (PoolTreeNode child : children) { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java deleted file mode 100644 index 81952bf1bb..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.optimizer; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Set; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.ql.Driver; -import org.apache.hadoop.hive.ql.exec.TableScanOperator; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.exec.TaskFactory; -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.index.IndexMetadataChangeTask; -import org.apache.hadoop.hive.ql.index.IndexMetadataChangeWork; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.optimizer.physical.index.IndexWhereProcessor; -import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; -import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.ql.session.LineageState; - -/** - * Utility class for index support. - * Currently used for BITMAP and AGGREGATE index - * - */ -public final class IndexUtils { - - private static final Logger LOG = LoggerFactory.getLogger(IndexWhereProcessor.class.getName()); - - private IndexUtils(){ - } - - /** - * Check the partitions used by the table scan to make sure they also exist in the - * index table. - * @param pctx - * @param indexes - * @return partitions used by query. null if they do not exist in index table - * @throws HiveException - */ - public static Set checkPartitionsCoveredByIndex(TableScanOperator tableScan, - ParseContext pctx, List indexes) throws HiveException { - Hive hive = Hive.get(pctx.getConf()); - // make sure each partition exists on the index table - PrunedPartitionList queryPartitionList = pctx.getOpToPartList().get(tableScan); - Set queryPartitions = queryPartitionList.getPartitions(); - if (queryPartitions == null || queryPartitions.isEmpty()) { - return null; - } - - for (Partition part : queryPartitions) { - if (!containsPartition(hive, part, indexes)) { - return null; // problem if it doesn't contain the partition - } - } - - return queryPartitions; - } - - /** - * check that every index table contains the given partition and is fresh - */ - private static boolean containsPartition(Hive hive, Partition part, List indexes) - throws HiveException { - HashMap partSpec = part.getSpec(); - if (partSpec.isEmpty()) { - // empty specs come from non-partitioned tables - return isIndexTableFresh(hive, indexes, part.getTable()); - } - - for (Index index : indexes) { - // index.getDbName() is used as a default database, which is database of target table, - // if index.getIndexTableName() does not contain database name - String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName()); - Table indexTable = hive.getTable(qualified[0], qualified[1]); - // get partitions that match the spec - Partition matchingPartition = hive.getPartition(indexTable, partSpec, false); - if (matchingPartition == null) { - LOG.info("Index table " + indexTable + "did not contain built partition that matched " + partSpec); - return false; - } else if (!isIndexPartitionFresh(hive, index, part)) { - return false; - } - } - return true; - } - - /** - * Check the index partitions on a partitioned table exist and are fresh - */ - private static boolean isIndexPartitionFresh(Hive hive, Index index, - Partition part) throws HiveException { - LOG.info("checking index staleness..."); - try { - String indexTs = index.getParameters().get(part.getSpec().toString()); - if (indexTs == null) { - return false; - } - - FileSystem partFs = part.getDataLocation().getFileSystem(hive.getConf()); - FileStatus[] parts = partFs.listStatus(part.getDataLocation(), FileUtils.HIDDEN_FILES_PATH_FILTER); - for (FileStatus status : parts) { - if (status.getModificationTime() > Long.parseLong(indexTs)) { - LOG.info("Index is stale on partition '" + part.getName() - + "'. Modified time (" + status.getModificationTime() + ") for '" + status.getPath() - + "' is higher than index creation time (" + indexTs + ")."); - return false; - } - } - } catch (IOException e) { - throw new HiveException("Failed to grab timestamp information from partition '" + part.getName() + "': " + e.getMessage(), e); - } - return true; - } - - /** - * Check that the indexes on the un-partitioned table exist and are fresh - */ - private static boolean isIndexTableFresh(Hive hive, List indexes, Table src) - throws HiveException { - //check that they exist - if (indexes == null || indexes.size() == 0) { - return false; - } - //check that they are not stale - for (Index index : indexes) { - LOG.info("checking index staleness..."); - try { - String indexTs = index.getParameters().get("base_timestamp"); - if (indexTs == null) { - return false; - } - - FileSystem srcFs = src.getPath().getFileSystem(hive.getConf()); - FileStatus[] srcs = srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); - for (FileStatus status : srcs) { - if (status.getModificationTime() > Long.parseLong(indexTs)) { - LOG.info("Index is stale on table '" + src.getTableName() - + "'. Modified time (" + status.getModificationTime() + ") for '" + status.getPath() - + "' is higher than index creation time (" + indexTs + ")."); - return false; - } - } - } catch (IOException e) { - throw new HiveException("Failed to grab timestamp information from table '" + src.getTableName() + "': " + e.getMessage(), e); - } - } - return true; - } - - - /** - * Get a list of indexes on a table that match given types. - */ - public static List getIndexes(Table baseTableMetaData, List matchIndexTypes) - throws SemanticException { - List matchingIndexes = new ArrayList(); - - List indexesOnTable; - try { - indexesOnTable = getAllIndexes(baseTableMetaData, (short) -1); // get all indexes - } catch (HiveException e) { - throw new SemanticException("Error accessing metastore", e); - } - - for (Index index : indexesOnTable) { - String indexType = index.getIndexHandlerClass(); - if (matchIndexTypes.contains(indexType)) { - matchingIndexes.add(index); - } - } - return matchingIndexes; - } - - /** - * @return List containing Indexes names if there are indexes on this table - * @throws HiveException - **/ - public static List getAllIndexes(Table table, short max) throws HiveException { - Hive hive = Hive.get(); - return hive.getIndexes(table.getTTable().getDbName(), table.getTTable().getTableName(), max); - } - - public static Task createRootTask( - HiveConf builderConf, - Set inputs, - Set outputs, - StringBuilder command, - LinkedHashMap partSpec, - String indexTableName, - String dbName, - LineageState lineageState){ - // Don't try to index optimize the query to build the index - HiveConf.setBoolVar(builderConf, HiveConf.ConfVars.HIVEOPTINDEXFILTER, false); - Driver driver = new Driver(builderConf, SessionState.get().getUserName(), lineageState); - driver.compile(command.toString(), false); - - Task rootTask = driver.getPlan().getRootTasks().get(0); - inputs.addAll(driver.getPlan().getInputs()); - outputs.addAll(driver.getPlan().getOutputs()); - - IndexMetadataChangeWork indexMetaChange = new IndexMetadataChangeWork(partSpec, - indexTableName, dbName); - IndexMetadataChangeTask indexMetaChangeTsk = - (IndexMetadataChangeTask) TaskFactory.get(indexMetaChange, builderConf); - indexMetaChangeTsk.setWork(indexMetaChange); - rootTask.addDependentTask(indexMetaChangeTsk); - - driver.destroy(); - - return rootTask; - } - - -} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java index 4f6be6d6b4..4151b8c7e1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverterPostProc; import org.apache.hadoop.hive.ql.optimizer.correlation.CorrelationOptimizer; import org.apache.hadoop.hive.ql.optimizer.correlation.ReduceSinkDeDuplication; -import org.apache.hadoop.hive.ql.optimizer.index.RewriteGBUsingIndex; import org.apache.hadoop.hive.ql.optimizer.lineage.Generator; import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPruner; import org.apache.hadoop.hive.ql.optimizer.metainfo.annotation.AnnotateWithOpTraits; @@ -110,9 +109,9 @@ public void initialize(HiveConf hiveConf) { } if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCONSTANTPROPAGATION) && - !pctx.getContext().isCboSucceeded()) { - // We run constant propagation twice because after predicate pushdown, filter expressions - // are combined and may become eligible for reduction (like is not null filter). + !pctx.getContext().isCboSucceeded()) { + // We run constant propagation twice because after predicate pushdown, filter expressions + // are combined and may become eligible for reduction (like is not null filter). transformations.add(new ConstantPropagate()); } @@ -155,9 +154,6 @@ public void initialize(HiveConf hiveConf) { LOG.warn("Skew join is currently not supported in tez! Disabling the skew join optimization."); } } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTGBYUSINGINDEX)) { - transformations.add(new RewriteGBUsingIndex()); - } transformations.add(new SamplePruner()); MapJoinProcessor mapJoinProcessor = isSparkExecEngine ? new SparkMapJoinProcessor() @@ -194,7 +190,7 @@ public void initialize(HiveConf hiveConf) { if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.TEZ_OPTIMIZE_BUCKET_PRUNING) && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTPPD) - && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTINDEXFILTER)) { + ) { final boolean compatMode = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT); transformations.add(new FixedBucketPruningOptimizer(compatMode)); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java index 5f0e842436..c0ce684eb9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.repl.ReplStateLogWork; import org.apache.hadoop.hive.ql.exec.repl.bootstrap.ReplLoadWork; -import org.apache.hadoop.hive.ql.index.IndexMetadataChangeWork; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.parse.GenTezProcContext; import org.apache.hadoop.hive.ql.parse.GenTezWork; @@ -102,7 +101,6 @@ else if(work instanceof ExplainWork) { } else if(work instanceof ReplLoadWork || work instanceof ReplStateLogWork || - work instanceof IndexMetadataChangeWork || work instanceof GenTezWork || work instanceof GenSparkWork || work instanceof ArchiveWork || diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyCtx.java deleted file mode 100644 index 641d877e88..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyCtx.java +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.optimizer.index; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Stack; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.ql.exec.FilterOperator; -import org.apache.hadoop.hive.ql.exec.GroupByOperator; -import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; -import org.apache.hadoop.hive.ql.exec.SelectOperator; -import org.apache.hadoop.hive.ql.exec.TableScanOperator; -import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher; -import org.apache.hadoop.hive.ql.lib.Dispatcher; -import org.apache.hadoop.hive.ql.lib.GraphWalker; -import org.apache.hadoop.hive.ql.lib.Node; -import org.apache.hadoop.hive.ql.lib.NodeProcessor; -import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; -import org.apache.hadoop.hive.ql.lib.PreOrderOnceWalker; -import org.apache.hadoop.hive.ql.lib.Rule; -import org.apache.hadoop.hive.ql.lib.RuleRegExp; -import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; - -/** - * RewriteCanApplyCtx class stores the context for the {@link RewriteCanApplyProcFactory} - * to determine if any index can be used and if the input query - * meets all the criteria for rewrite optimization. - */ -public final class RewriteCanApplyCtx implements NodeProcessorCtx { - - private static final Logger LOG = LoggerFactory.getLogger(RewriteCanApplyCtx.class.getName()); - - private RewriteCanApplyCtx(ParseContext parseContext) { - this.parseContext = parseContext; - } - - public static RewriteCanApplyCtx getInstance(ParseContext parseContext){ - return new RewriteCanApplyCtx(parseContext); - } - - // Rewrite Variables - private boolean selClauseColsFetchException = false; - private boolean queryHasGroupBy = false; - private boolean aggFuncIsNotCount = false; - private boolean aggParameterException = false; - - //The most important, indexKey - private String indexKey; - - private final ParseContext parseContext; - private String alias; - private String baseTableName; - private String indexTableName; - private String aggFunction; - - private TableScanOperator tableScanOperator; - private List selectOperators; - private List groupByOperators; - - void resetCanApplyCtx(){ - setQueryHasGroupBy(false); - setAggFuncIsNotCount(false); - setSelClauseColsFetchException(false); - setBaseTableName(""); - setAggFunction(""); - setIndexKey(""); - } - - public boolean isQueryHasGroupBy() { - return queryHasGroupBy; - } - - public void setQueryHasGroupBy(boolean queryHasGroupBy) { - this.queryHasGroupBy = queryHasGroupBy; - } - - public boolean isAggFuncIsNotCount() { - return aggFuncIsNotCount; - } - - public void setAggFuncIsNotCount(boolean aggFuncIsNotCount) { - this.aggFuncIsNotCount = aggFuncIsNotCount; - } - - public Map getBaseToIdxTableMap() { - return baseToIdxTableMap; - } - - public void setAggFunction(String aggFunction) { - this.aggFunction = aggFunction; - } - - public String getAggFunction() { - return aggFunction; - } - - public void setSelClauseColsFetchException(boolean selClauseColsFetchException) { - this.selClauseColsFetchException = selClauseColsFetchException; - } - - public boolean isSelClauseColsFetchException() { - return selClauseColsFetchException; - } - - public String getAlias() { - return alias; - } - - public void setAlias(String alias) { - this.alias = alias; - } - - public String getBaseTableName() { - return baseTableName; - } - - public void setBaseTableName(String baseTableName) { - this.baseTableName = baseTableName; - } - - public String getIndexTableName() { - return indexTableName; - } - - public void setIndexTableName(String indexTableName) { - this.indexTableName = indexTableName; - } - - public ParseContext getParseContext() { - return parseContext; - } - - /** - * This method walks all the nodes starting from topOp TableScanOperator node - * and invokes methods from {@link RewriteCanApplyProcFactory} for each of the rules - * added to the opRules map. We use the {@link PreOrderOnceWalker} for a pre-order - * traversal of the operator tree. - * - * The methods from {@link RewriteCanApplyProcFactory} set appropriate values in - * {@link RewriteVars} enum. - * - * @param topOp - * @throws SemanticException - */ - void populateRewriteVars(TableScanOperator topOp) - throws SemanticException{ - Map opRules = new LinkedHashMap(); - //^TS%[(SEL%)|(FIL%)]*GRY%[(FIL%)]*RS%[(FIL%)]*GRY% - opRules.put( - new RuleRegExp("R1", TableScanOperator.getOperatorName() + "%[(" - + SelectOperator.getOperatorName() + "%)|(" + FilterOperator.getOperatorName() + "%)]*" - + GroupByOperator.getOperatorName() + "%[" + FilterOperator.getOperatorName() + "%]*" - + ReduceSinkOperator.getOperatorName() + "%[" + FilterOperator.getOperatorName() - + "%]*" + GroupByOperator.getOperatorName() + "%"), - RewriteCanApplyProcFactory.canApplyOnTableScanOperator(topOp)); - - // The dispatcher fires the processor corresponding to the closest matching - // rule and passes the context along - Dispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, this); - GraphWalker ogw = new PreOrderOnceWalker(disp); - - // Create a list of topop nodes - List topNodes = new ArrayList(); - topNodes.add(topOp); - - try { - ogw.startWalking(topNodes, null); - } catch (SemanticException e) { - LOG.error("Exception in walking operator tree. Rewrite variables not populated"); - LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); - } - } - - - /** - * Default procedure for {@link DefaultRuleDispatcher}. - * @return - */ - private NodeProcessor getDefaultProc() { - return new NodeProcessor() { - @Override - public Object process(Node nd, Stack stack, - NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { - return null; - } - }; - } - - - //Map for base table to index table mapping - //TableScan operator for base table will be modified to read from index table - private final Map baseToIdxTableMap = new HashMap();; - - public void addTable(String baseTableName, String indexTableName) { - baseToIdxTableMap.put(baseTableName, indexTableName); - } - - public String findBaseTable(String baseTableName) { - return baseToIdxTableMap.get(baseTableName); - } - - public String getIndexKey() { - return indexKey; - } - - public void setIndexKey(String indexKey) { - this.indexKey = indexKey; - } - - public TableScanOperator getTableScanOperator() { - return tableScanOperator; - } - - public void setTableScanOperator(TableScanOperator tableScanOperator) { - this.tableScanOperator = tableScanOperator; - } - - public List getSelectOperators() { - return selectOperators; - } - - public void setSelectOperators(List selectOperators) { - this.selectOperators = selectOperators; - } - - public List getGroupByOperators() { - return groupByOperators; - } - - public void setGroupByOperators(List groupByOperators) { - this.groupByOperators = groupByOperators; - } - - public void setAggParameterException(boolean aggParameterException) { - this.aggParameterException = aggParameterException; - } - - public boolean isAggParameterException() { - return aggParameterException; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyProcFactory.java deleted file mode 100644 index 41d228253a..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyProcFactory.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.optimizer.index; - -import org.apache.hadoop.hive.ql.exec.GroupByOperator; -import org.apache.hadoop.hive.ql.exec.Operator; -import org.apache.hadoop.hive.ql.exec.SelectOperator; -import org.apache.hadoop.hive.ql.exec.TableScanOperator; -import org.apache.hadoop.hive.ql.lib.Node; -import org.apache.hadoop.hive.ql.lib.NodeProcessor; -import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; -import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AggregationDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils; -import org.apache.hadoop.hive.ql.plan.GroupByDesc; -import org.apache.hadoop.hive.ql.plan.OperatorDesc; - -import java.util.ArrayList; -import java.util.List; -import java.util.Stack; - -/** - * Factory of methods used by {@link RewriteGBUsingIndex} - * to determine if the rewrite optimization can be applied to the input query. - * - */ -public final class RewriteCanApplyProcFactory { - public static CheckTableScanProc canApplyOnTableScanOperator(TableScanOperator topOp) { - return new CheckTableScanProc(); - } - - private static class CheckTableScanProc implements NodeProcessor { - public CheckTableScanProc() { - } - - public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object... nodeOutputs) - throws SemanticException { - RewriteCanApplyCtx canApplyCtx = (RewriteCanApplyCtx) ctx; - for (Node node : stack) { - // For table scan operator, - // check ReferencedColumns to make sure that only the index column is - // selected for the following operators. - if (node instanceof TableScanOperator) { - TableScanOperator ts = (TableScanOperator) node; - canApplyCtx.setTableScanOperator(ts); - List selectColumns = ts.getConf().getReferencedColumns(); - if (selectColumns == null || selectColumns.size() != 1) { - canApplyCtx.setSelClauseColsFetchException(true); - return null; - } else { - canApplyCtx.setIndexKey(selectColumns.get(0)); - } - } else if (node instanceof SelectOperator) { - // For select operators in the stack, we just add them - if (canApplyCtx.getSelectOperators() == null) { - canApplyCtx.setSelectOperators(new ArrayList()); - } - canApplyCtx.getSelectOperators().add((SelectOperator) node); - } else if (node instanceof GroupByOperator) { - if (canApplyCtx.getGroupByOperators() == null) { - canApplyCtx.setGroupByOperators(new ArrayList()); - } - // According to the pre-order, - // the first GroupbyOperator is the one before RS - // and the second one is the one after RS - GroupByOperator operator = (GroupByOperator) node; - canApplyCtx.getGroupByOperators().add(operator); - if (!canApplyCtx.isQueryHasGroupBy()) { - canApplyCtx.setQueryHasGroupBy(true); - GroupByDesc conf = operator.getConf(); - List aggrList = conf.getAggregators(); - if (aggrList == null || aggrList.size() != 1 - || !("count".equals(aggrList.get(0).getGenericUDAFName()))) { - // In the current implementation, we make sure that only count is - // in the function - canApplyCtx.setAggFuncIsNotCount(true); - return null; - } else { - List para = aggrList.get(0).getParameters(); - if (para == null || para.size() == 0 || para.size() > 1) { - canApplyCtx.setAggParameterException(true); - return null; - } else { - ExprNodeDesc expr = ExprNodeDescUtils.backtrack(para.get(0), operator, - (Operator) stack.get(0)); - if (!(expr instanceof ExprNodeColumnDesc)) { - canApplyCtx.setAggParameterException(true); - return null; - } - } - } - } - } - } - return null; - } - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java deleted file mode 100644 index 3cb176e3c3..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java +++ /dev/null @@ -1,359 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.optimizer.index; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.ql.exec.TableScanOperator; -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.index.AggregateIndexHandler; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.optimizer.IndexUtils; -import org.apache.hadoop.hive.ql.optimizer.Transform; -import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; - - -/** - * RewriteGBUsingIndex is implemented as one of the Rule-based Optimizations. - * Implements optimizations for GroupBy clause rewrite using aggregate index. - * This optimization rewrites GroupBy query over base table to the query over simple table-scan - * over index table, if there is index on the group by key(s) or the distinct column(s). - * E.g. - * - * select count(key) - * from table - * group by key; - * - * to - * - * select sum(_count_of_key) - * from idx_table - * group by key; - * - * - * The rewrite supports following queries: - *
    - *
  • Queries having only those col refs that are in the index key. - *
  • Queries that have index key col refs - *
      - *
    • in SELECT - *
    • in WHERE - *
    • in GROUP BY - *
    - *
  • Queries with agg func COUNT(index key col ref) in SELECT - *
  • Queries with SELECT DISTINCT index_key_col_refs - *
  • Queries having a subquery satisfying above condition (only the subquery is rewritten) - *
- * - * @see AggregateIndexHandler - * @see IndexUtils - * @see RewriteCanApplyCtx - * @see RewriteCanApplyProcFactory - * @see RewriteParseContextGenerator - * @see RewriteQueryUsingAggregateIndexCtx - * @see RewriteQueryUsingAggregateIndex - * For test cases, @see ql_rewrite_gbtoidx.q - */ - -public class RewriteGBUsingIndex extends Transform { - private ParseContext parseContext; - // Assumes one instance of this + single-threaded compilation for each query. - private Hive hiveDb; - private HiveConf hiveConf; - private static final Logger LOG = LoggerFactory.getLogger(RewriteGBUsingIndex.class.getName()); - - /* - * Stores the list of top TableScanOperator names for which the rewrite - * can be applied and the action that needs to be performed for operator tree - * starting from this TableScanOperator - */ - private final Map tsOpToProcess = - new LinkedHashMap(); - - //Index Validation Variables - private static final String IDX_BUCKET_COL = "_bucketname"; - private static final String IDX_OFFSETS_ARRAY_COL = "_offsets"; - - - @Override - public ParseContext transform(ParseContext pctx) throws SemanticException { - parseContext = pctx; - hiveConf = parseContext.getConf(); - try { - hiveDb = Hive.get(hiveConf); - } catch (HiveException e) { - LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); - } - - // Don't try to index optimize the query to build the index - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTINDEXFILTER, false); - - /* Check if the input query passes all the tests to be eligible for a rewrite - * If yes, rewrite original query; else, return the current parseContext - */ - if (shouldApplyOptimization()) { - LOG.info("Rewriting Original Query using " + getName() + " optimization."); - rewriteOriginalQuery(); - } - return parseContext; - } - - private String getName() { - return "RewriteGBUsingIndex"; - } - - /** - * We traverse the current operator tree to check for conditions in which the - * optimization cannot be applied. - * - * At the end, we check if all conditions have passed for rewrite. If yes, we - * determine if the the index is usable for rewrite. Else, we log the condition which - * did not meet the rewrite criterion. - * - * @return - * @throws SemanticException - */ - boolean shouldApplyOptimization() throws SemanticException { - Map> tableToIndex = getIndexesForRewrite(); - if (tableToIndex.isEmpty()) { - LOG.debug("No Valid Index Found to apply Rewrite, " + - "skipping " + getName() + " optimization"); - return false; - } - /* - * This code iterates over each TableScanOperator from the topOps map from ParseContext. - * For each operator tree originating from this top TableScanOperator, we determine - * if the optimization can be applied. If yes, we add the name of the top table to - * the tsOpToProcess to apply rewrite later on. - * */ - for (Map.Entry entry : parseContext.getTopOps().entrySet()) { - String alias = entry.getKey(); - TableScanOperator topOp = entry.getValue(); - Table table = topOp.getConf().getTableMetadata(); - List indexes = tableToIndex.get(table); - if (indexes.isEmpty()) { - continue; - } - if (table.isPartitioned()) { - //if base table has partitions, we need to check if index is built for - //all partitions. If not, then we do not apply the optimization - if (!checkIfIndexBuiltOnAllTablePartitions(topOp, indexes)) { - LOG.debug("Index is not built for all table partitions, " + - "skipping " + getName() + " optimization"); - continue; - } - } - //check if rewrite can be applied for operator tree - //if there are no partitions on base table - checkIfRewriteCanBeApplied(alias, topOp, table, indexes); - } - return !tsOpToProcess.isEmpty(); - } - - /** - * This methods checks if rewrite can be applied using the index and also - * verifies all conditions of the operator tree. - * - * @param topOp - TableScanOperator for a single the operator tree branch - * @param indexes - Map of a table and list of indexes on it - * @return - true if rewrite can be applied on the current branch; false otherwise - * @throws SemanticException - */ - private boolean checkIfRewriteCanBeApplied(String alias, TableScanOperator topOp, - Table baseTable, List indexes) throws SemanticException{ - //Context for checking if this optimization can be applied to the input query - RewriteCanApplyCtx canApplyCtx = RewriteCanApplyCtx.getInstance(parseContext); - canApplyCtx.setAlias(alias); - canApplyCtx.setBaseTableName(baseTable.getTableName()); - canApplyCtx.populateRewriteVars(topOp); - Map indexTableMap = getIndexToKeysMap(indexes); - for (Map.Entry entry : indexTableMap.entrySet()) { - //we rewrite the original query using the first valid index encountered - //this can be changed if we have a better mechanism to - //decide which index will produce a better rewrite - Index index = entry.getKey(); - String indexKeyName = entry.getValue(); - //break here if any valid index is found to apply rewrite - if (canApplyCtx.getIndexKey() != null && canApplyCtx.getIndexKey().equals(indexKeyName) - && checkIfAllRewriteCriteriaIsMet(canApplyCtx)) { - canApplyCtx.setAggFunction("_count_of_" + indexKeyName + ""); - canApplyCtx.addTable(canApplyCtx.getBaseTableName(), index.getIndexTableName()); - canApplyCtx.setIndexTableName(index.getIndexTableName()); - tsOpToProcess.put(alias, canApplyCtx); - return true; - } - } - return false; - } - - /** - * Get a list of indexes which can be used for rewrite. - * @return - * @throws SemanticException - */ - private Map> getIndexesForRewrite() throws SemanticException{ - List supportedIndexes = new ArrayList(); - supportedIndexes.add(AggregateIndexHandler.class.getName()); - - // query the metastore to know what columns we have indexed - Collection topTables = parseContext.getTopOps().values(); - Map> indexes = new HashMap>(); - for (TableScanOperator op : topTables) { - TableScanOperator tsOP = op; - List tblIndexes = IndexUtils.getIndexes(tsOP.getConf().getTableMetadata(), - supportedIndexes); - if (tblIndexes.size() > 0) { - indexes.put(tsOP.getConf().getTableMetadata(), tblIndexes); - } - } - - return indexes; - } - - /** - * This method checks if the index is built on all partitions of the base - * table. If not, then the method returns false as we do not apply optimization - * for this case. - * @param tableScan - * @param indexes - * @return - * @throws SemanticException - */ - private boolean checkIfIndexBuiltOnAllTablePartitions(TableScanOperator tableScan, - List indexes) throws SemanticException { - // check if we have indexes on all partitions in this table scan - Set queryPartitions; - try { - queryPartitions = IndexUtils.checkPartitionsCoveredByIndex(tableScan, parseContext, indexes); - if (queryPartitions == null) { // partitions not covered - return false; - } - } catch (HiveException e) { - LOG.error("Fatal Error: problem accessing metastore", e); - throw new SemanticException(e); - } - if (queryPartitions.size() != 0) { - return true; - } - return false; - } - - /** - * This code block iterates over indexes on the table and populates the indexToKeys map - * for all the indexes that satisfy the rewrite criteria. - * @param indexTables - * @return - * @throws SemanticException - */ - Map getIndexToKeysMap(List indexTables) throws SemanticException{ - Hive hiveInstance = hiveDb; - Map indexToKeysMap = new LinkedHashMap(); - for (int idxCtr = 0; idxCtr < indexTables.size(); idxCtr++) { - Index index = indexTables.get(idxCtr); - //Getting index key columns - StorageDescriptor sd = index.getSd(); - List idxColList = sd.getCols(); - assert idxColList.size()==1; - String indexKeyName = idxColList.get(0).getName(); - // Check that the index schema is as expected. This code block should - // catch problems of this rewrite breaking when the AggregateIndexHandler - // index is changed. - List idxTblColNames = new ArrayList(); - try { - String[] qualified = Utilities.getDbTableName(index.getDbName(), - index.getIndexTableName()); - Table idxTbl = hiveInstance.getTable(qualified[0], qualified[1]); - for (FieldSchema idxTblCol : idxTbl.getCols()) { - idxTblColNames.add(idxTblCol.getName()); - } - } catch (HiveException e) { - LOG.error("Got exception while locating index table, " + - "skipping " + getName() + " optimization"); - LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); - } - assert(idxTblColNames.contains(IDX_BUCKET_COL)); - assert(idxTblColNames.contains(IDX_OFFSETS_ARRAY_COL)); - // we add all index tables which can be used for rewrite - // and defer the decision of using a particular index for later - // this is to allow choosing a index if a better mechanism is - // designed later to chose a better rewrite - indexToKeysMap.put(index, indexKeyName); - } - return indexToKeysMap; - } - - /** - * Method to rewrite the input query if all optimization criteria is passed. - * The method iterates over the tsOpToProcess {@link ArrayList} to apply the rewrites - * @throws SemanticException - * - */ - private void rewriteOriginalQuery() throws SemanticException { - for (RewriteCanApplyCtx canApplyCtx : tsOpToProcess.values()) { - RewriteQueryUsingAggregateIndexCtx rewriteQueryCtx = - RewriteQueryUsingAggregateIndexCtx.getInstance(parseContext, hiveDb, canApplyCtx); - rewriteQueryCtx.invokeRewriteQueryProc(); - parseContext = rewriteQueryCtx.getParseContext(); - } - LOG.info("Finished Rewriting query"); - } - - - /** - * This method logs the reason for which we cannot apply the rewrite optimization. - * @return - */ - boolean checkIfAllRewriteCriteriaIsMet(RewriteCanApplyCtx canApplyCtx) { - if (canApplyCtx.isSelClauseColsFetchException()) { - LOG.debug("Got exception while locating child col refs for select list, " + "skipping " - + getName() + " optimization."); - return false; - } - if (canApplyCtx.isAggFuncIsNotCount()) { - LOG.debug("Agg func other than count is " + "not supported by " + getName() - + " optimization."); - return false; - } - if (canApplyCtx.isAggParameterException()) { - LOG.debug("Got exception while locating parameter refs for aggregation, " + "skipping " - + getName() + " optimization."); - return false; - } - return true; - } -} - diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java deleted file mode 100644 index 2a01d29b5f..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.optimizer.index; - -import java.io.IOException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.Context; -import org.apache.hadoop.hive.ql.QueryState; -import org.apache.hadoop.hive.ql.exec.Operator; -import org.apache.hadoop.hive.ql.parse.ASTNode; -import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; -import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.ParseDriver; -import org.apache.hadoop.hive.ql.parse.ParseException; -import org.apache.hadoop.hive.ql.parse.ParseUtils; -import org.apache.hadoop.hive.ql.parse.QB; -import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer; -import org.apache.hadoop.hive.ql.parse.SemanticAnalyzerFactory; -import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.OperatorDesc; - - -/** - * RewriteParseContextGenerator is a class that offers methods to generate operator tree - * for input queries. It is implemented on lines of the analyzeInternal(..) method - * of {@link SemanticAnalyzer} but it creates only the ParseContext for the input query command. - * It does not optimize or generate map-reduce tasks for the input query. - * This can be used when you need to create operator tree for an internal query. - * - */ -public final class RewriteParseContextGenerator { - - private static final Logger LOG = LoggerFactory.getLogger(RewriteParseContextGenerator.class.getName()); - - /** - * Parse the input {@link String} command and generate an operator tree. - * @param conf - * @param command - * @throws SemanticException - */ - public static Operator generateOperatorTree(QueryState queryState, - String command) throws SemanticException { - Operator operatorTree; - try { - Context ctx = new Context(queryState.getConf()); - ASTNode tree = ParseUtils.parse(command, ctx); - - BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(queryState, tree); - assert(sem instanceof SemanticAnalyzer); - operatorTree = doSemanticAnalysis((SemanticAnalyzer) sem, tree, ctx); - LOG.info("Sub-query Semantic Analysis Completed"); - } catch (IOException e) { - LOG.error("IOException in generating the operator " + - "tree for input command - " + command + " " , e); - LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); - } catch (ParseException e) { - LOG.error("ParseException in generating the operator " + - "tree for input command - " + command + " " , e); - LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); - } catch (SemanticException e) { - LOG.error("SemanticException in generating the operator " + - "tree for input command - " + command + " " , e); - LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); - } - return operatorTree; - } - - /** - * For the input ASTNode tree, perform a semantic analysis and check metadata - * Generate a operator tree and return it. - * - * @param ctx - * @param sem - * @param ast - * @return - * @throws SemanticException - */ - private static Operator doSemanticAnalysis(SemanticAnalyzer sem, - ASTNode ast, Context ctx) throws SemanticException { - QB qb = new QB(null, null, false); - ASTNode child = ast; - ParseContext subPCtx = sem.getParseContext(); - subPCtx.setContext(ctx); - sem.initParseCtx(subPCtx); - - LOG.info("Starting Sub-query Semantic Analysis"); - sem.doPhase1(child, qb, sem.initPhase1Ctx(), null); - LOG.info("Completed phase 1 of Sub-query Semantic Analysis"); - - sem.getMetaData(qb); - LOG.info("Completed getting MetaData in Sub-query Semantic Analysis"); - - LOG.info("Sub-query Abstract syntax tree: " + ast.toStringTree()); - Operator operator = sem.genPlan(qb); - - LOG.info("Sub-query Completed plan generation"); - return operator; - } - -} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java deleted file mode 100644 index 658422cfa0..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java +++ /dev/null @@ -1,325 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.optimizer.index; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; - -import org.apache.hadoop.hive.ql.optimizer.FieldNode; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.exec.ColumnInfo; -import org.apache.hadoop.hive.ql.exec.FunctionRegistry; -import org.apache.hadoop.hive.ql.exec.GroupByOperator; -import org.apache.hadoop.hive.ql.exec.Operator; -import org.apache.hadoop.hive.ql.exec.OperatorUtils; -import org.apache.hadoop.hive.ql.exec.RowSchema; -import org.apache.hadoop.hive.ql.exec.SelectOperator; -import org.apache.hadoop.hive.ql.exec.TableScanOperator; -import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.optimizer.ColumnPrunerProcFactory; -import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AggregationDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; -import org.apache.hadoop.hive.ql.plan.GroupByDesc; -import org.apache.hadoop.hive.ql.plan.TableScanDesc; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; -import org.apache.hadoop.hive.serde2.SerDeException; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.StructField; -import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; -import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; - -/** - * RewriteQueryUsingAggregateIndexCtx class stores the - * context for the {@link RewriteQueryUsingAggregateIndex} - * used to rewrite operator plan with index table instead of base table. - */ - -public final class RewriteQueryUsingAggregateIndexCtx implements NodeProcessorCtx { - private static final Logger LOG = LoggerFactory.getLogger(RewriteQueryUsingAggregateIndexCtx.class.getName()); - private RewriteQueryUsingAggregateIndexCtx(ParseContext parseContext, Hive hiveDb, - RewriteCanApplyCtx canApplyCtx) { - this.parseContext = parseContext; - this.hiveDb = hiveDb; - this.canApplyCtx = canApplyCtx; - this.indexTableName = canApplyCtx.getIndexTableName(); - this.alias = canApplyCtx.getAlias(); - this.aggregateFunction = canApplyCtx.getAggFunction(); - this.indexKey = canApplyCtx.getIndexKey(); - } - - public static RewriteQueryUsingAggregateIndexCtx getInstance(ParseContext parseContext, - Hive hiveDb, RewriteCanApplyCtx canApplyCtx) { - return new RewriteQueryUsingAggregateIndexCtx( - parseContext, hiveDb, canApplyCtx); - } - - // Assumes one instance of this + single-threaded compilation for each query. - private final Hive hiveDb; - private final ParseContext parseContext; - private final RewriteCanApplyCtx canApplyCtx; - //We need the GenericUDAFEvaluator for GenericUDAF function "sum" - private GenericUDAFEvaluator eval = null; - private final String indexTableName; - private final String alias; - private final String aggregateFunction; - private ExprNodeColumnDesc aggrExprNode = null; - private String indexKey; - - public ParseContext getParseContext() { - return parseContext; - } - - public Hive getHiveDb() { - return hiveDb; - } - - public String getIndexName() { - return indexTableName; - } - - public GenericUDAFEvaluator getEval() { - return eval; - } - - public void setEval(GenericUDAFEvaluator eval) { - this.eval = eval; - } - - public void setAggrExprNode(ExprNodeColumnDesc aggrExprNode) { - this.aggrExprNode = aggrExprNode; - } - - public ExprNodeColumnDesc getAggrExprNode() { - return aggrExprNode; - } - - public String getAlias() { - return alias; - } - - public String getAggregateFunction() { - return aggregateFunction; - } - - public String getIndexKey() { - return indexKey; - } - - public void setIndexKey(String indexKey) { - this.indexKey = indexKey; - } - - public void invokeRewriteQueryProc() throws SemanticException { - this.replaceTableScanProcess(canApplyCtx.getTableScanOperator()); - //We need aggrExprNode. Thus, replaceGroupByOperatorProcess should come before replaceSelectOperatorProcess - for (int index = 0; index < canApplyCtx.getGroupByOperators().size(); index++) { - this.replaceGroupByOperatorProcess(canApplyCtx.getGroupByOperators().get(index), index); - } - for (SelectOperator selectperator : canApplyCtx.getSelectOperators()) { - this.replaceSelectOperatorProcess(selectperator); - } - } - - /** - * This method replaces the original TableScanOperator with the new - * TableScanOperator and metadata that scans over the index table rather than - * scanning over the original table. - * - */ - private void replaceTableScanProcess(TableScanOperator scanOperator) throws SemanticException { - RewriteQueryUsingAggregateIndexCtx rewriteQueryCtx = this; - String alias = rewriteQueryCtx.getAlias(); - - // Need to remove the original TableScanOperators from these data structures - // and add new ones - HashMap topOps = rewriteQueryCtx.getParseContext() - .getTopOps(); - - // remove original TableScanOperator - topOps.remove(alias); - - String indexTableName = rewriteQueryCtx.getIndexName(); - Table indexTableHandle = null; - try { - indexTableHandle = rewriteQueryCtx.getHiveDb().getTable(indexTableName); - } catch (HiveException e) { - LOG.error("Error while getting the table handle for index table."); - LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); - } - - // construct a new descriptor for the index table scan - TableScanDesc indexTableScanDesc = new TableScanDesc(indexTableHandle); - indexTableScanDesc.setGatherStats(false); - - String k = org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(indexTableName) + Path.SEPARATOR; - indexTableScanDesc.setStatsAggPrefix(k); - scanOperator.setConf(indexTableScanDesc); - - // Construct the new RowResolver for the new TableScanOperator - ArrayList sigRS = new ArrayList(); - try { - StructObjectInspector rowObjectInspector = (StructObjectInspector) indexTableHandle - .getDeserializer().getObjectInspector(); - StructField field = rowObjectInspector.getStructFieldRef(rewriteQueryCtx.getIndexKey()); - sigRS.add(new ColumnInfo(field.getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector( - field.getFieldObjectInspector()), indexTableName, false)); - } catch (SerDeException e) { - LOG.error("Error while creating the RowResolver for new TableScanOperator."); - LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); - } - RowSchema rs = new RowSchema(sigRS); - - // Set row resolver for new table - String newAlias = indexTableName; - int index = alias.lastIndexOf(":"); - if (index >= 0) { - newAlias = alias.substring(0, index) + ":" + indexTableName; - } - - // Scan operator now points to other table - scanOperator.getConf().setAlias(newAlias); - scanOperator.setAlias(indexTableName); - topOps.put(newAlias, scanOperator); - rewriteQueryCtx.getParseContext().setTopOps(topOps); - - ColumnPrunerProcFactory.setupNeededColumns(scanOperator, rs, - Arrays.asList(new FieldNode(rewriteQueryCtx.getIndexKey()))); - } - - /** - * This method replaces the original SelectOperator with the new - * SelectOperator with a new column indexed_key_column. - */ - private void replaceSelectOperatorProcess(SelectOperator operator) throws SemanticException { - RewriteQueryUsingAggregateIndexCtx rewriteQueryCtx = this; - // we need to set the colList, outputColumnNames, colExprMap, - // rowSchema for only that SelectOperator which precedes the GroupByOperator - // count(indexed_key_column) needs to be replaced by - // sum(`_count_of_indexed_key_column`) - List selColList = operator.getConf().getColList(); - selColList.add(rewriteQueryCtx.getAggrExprNode()); - - List selOutputColNames = operator.getConf().getOutputColumnNames(); - selOutputColNames.add(rewriteQueryCtx.getAggrExprNode().getColumn()); - - operator.getColumnExprMap().put(rewriteQueryCtx.getAggrExprNode().getColumn(), - rewriteQueryCtx.getAggrExprNode()); - - RowSchema selRS = operator.getSchema(); - List selRSSignature = selRS.getSignature(); - // Need to create a new type for Column[_count_of_indexed_key_column] node - PrimitiveTypeInfo pti = TypeInfoFactory.getPrimitiveTypeInfo("bigint"); - pti.setTypeName("bigint"); - ColumnInfo newCI = new ColumnInfo(rewriteQueryCtx.getAggregateFunction(), pti, "", false); - selRSSignature.add(newCI); - selRS.setSignature((ArrayList) selRSSignature); - operator.setSchema(selRS); - } - - /** - * We need to replace the count(indexed_column_key) GenericUDAF aggregation - * function for group-by construct to "sum" GenericUDAF. This method creates a - * new operator tree for a sample query that creates a GroupByOperator with - * sum aggregation function and uses that GroupByOperator information to - * replace the original GroupByOperator aggregation information. It replaces - * the AggregationDesc (aggregation descriptor) of the old GroupByOperator - * with the new Aggregation Desc of the new GroupByOperator. - * @return - */ - private void replaceGroupByOperatorProcess(GroupByOperator operator, int index) - throws SemanticException { - RewriteQueryUsingAggregateIndexCtx rewriteQueryCtx = this; - - // We need to replace the GroupByOperator which is before RS - if (index == 0) { - // the query contains the sum aggregation GenericUDAF - String selReplacementCommand = "select sum(`" + rewriteQueryCtx.getAggregateFunction() + "`)" - + " from `" + rewriteQueryCtx.getIndexName() + "` group by " - + rewriteQueryCtx.getIndexKey() + " "; - // retrieve the operator tree for the query, and the required GroupByOperator from it - Operator newOperatorTree = RewriteParseContextGenerator.generateOperatorTree( - rewriteQueryCtx.getParseContext().getQueryState(), - selReplacementCommand); - - // we get our new GroupByOperator here - GroupByOperator newGbyOperator = OperatorUtils.findLastOperatorUpstream( - newOperatorTree, GroupByOperator.class); - if (newGbyOperator == null) { - throw new SemanticException("Error replacing GroupBy operator."); - } - - // we need this information to set the correct colList, outputColumnNames - // in SelectOperator - ExprNodeColumnDesc aggrExprNode = null; - - // Construct the new AggregationDesc to get rid of the current - // internal names and replace them with new internal names - // as required by the operator tree - GroupByDesc newConf = newGbyOperator.getConf(); - List newAggrList = newConf.getAggregators(); - if (newAggrList != null && newAggrList.size() > 0) { - for (AggregationDesc aggregationDesc : newAggrList) { - rewriteQueryCtx.setEval(aggregationDesc.getGenericUDAFEvaluator()); - aggrExprNode = (ExprNodeColumnDesc) aggregationDesc.getParameters().get(0); - rewriteQueryCtx.setAggrExprNode(aggrExprNode); - } - } - - // Now the GroupByOperator has the new AggregationList; - // sum(`_count_of_indexed_key`) - // instead of count(indexed_key) - GroupByDesc oldConf = operator.getConf(); - oldConf.setAggregators((ArrayList) newAggrList); - operator.setConf(oldConf); - - } else { - // we just need to reset the GenericUDAFEvaluator and its name for this - // GroupByOperator whose parent is the ReduceSinkOperator - GroupByDesc childConf = operator.getConf(); - List childAggrList = childConf.getAggregators(); - if (childAggrList != null && childAggrList.size() > 0) { - for (AggregationDesc aggregationDesc : childAggrList) { - List paraList = aggregationDesc.getParameters(); - List parametersOIList = new ArrayList(); - for (ExprNodeDesc expr : paraList) { - parametersOIList.add(expr.getWritableObjectInspector()); - } - GenericUDAFEvaluator evaluator = FunctionRegistry.getGenericUDAFEvaluator("sum", - parametersOIList, false, false); - aggregationDesc.setGenericUDAFEvaluator(evaluator); - aggregationDesc.setGenericUDAFName("sum"); - } - } - } - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/IndexWhereResolver.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/IndexWhereResolver.java deleted file mode 100644 index d204fe863e..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/IndexWhereResolver.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.optimizer.physical; - -import java.util.ArrayList; - -import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; -import org.apache.hadoop.hive.ql.lib.Dispatcher; -import org.apache.hadoop.hive.ql.lib.GraphWalker; -import org.apache.hadoop.hive.ql.lib.Node; -import org.apache.hadoop.hive.ql.optimizer.physical.index.IndexWhereTaskDispatcher; -import org.apache.hadoop.hive.ql.parse.SemanticException; - -public class IndexWhereResolver implements PhysicalPlanResolver { - - @Override - public PhysicalContext resolve(PhysicalContext physicalContext) throws SemanticException { - Dispatcher dispatcher = new IndexWhereTaskDispatcher(physicalContext); - GraphWalker opGraphWalker = new DefaultGraphWalker(dispatcher); - ArrayList topNodes = new ArrayList(); - topNodes.addAll(physicalContext.getRootTasks()); - opGraphWalker.startWalking(topNodes, null); - - return physicalContext; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java index a64a498ca6..0f3c5f22f1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java @@ -60,9 +60,6 @@ private void initialize(HiveConf hiveConf) { } } - if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER)) { - resolvers.add(new IndexWhereResolver()); - } resolvers.add(new MapJoinResolver()); if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVEMETADATAONLYQUERIES)) { resolvers.add(new MetadataOnlyOptimizer()); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcCtx.java deleted file mode 100644 index 179d4c2ff9..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcCtx.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.optimizer.physical.index; - -import java.io.Serializable; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; -import org.apache.hadoop.hive.ql.parse.ParseContext; - -public class IndexWhereProcCtx implements NodeProcessorCtx { - - private static final Logger LOG = LoggerFactory.getLogger(IndexWhereProcCtx.class.getName()); - - private final Task currentTask; - private final ParseContext parseCtx; - - public IndexWhereProcCtx(Task task, ParseContext parseCtx) { - this.currentTask = task; - this.parseCtx = parseCtx; - } - - public ParseContext getParseContext() { - return parseCtx; - } - - public Task getCurrentTask() { - return currentTask; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcessor.java deleted file mode 100644 index b284afa2d7..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcessor.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.optimizer.physical.index; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.Stack; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.fs.ContentSummary; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.ql.exec.TableScanOperator; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.index.HiveIndexHandler; -import org.apache.hadoop.hive.ql.index.HiveIndexQueryContext; -import org.apache.hadoop.hive.ql.lib.Node; -import org.apache.hadoop.hive.ql.lib.NodeProcessor; -import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveUtils; -import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.optimizer.IndexUtils; -import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; -import org.apache.hadoop.hive.ql.plan.MapWork; -import org.apache.hadoop.hive.ql.plan.MapredWork; -import org.apache.hadoop.hive.ql.plan.TableDesc; -import org.apache.hadoop.hive.ql.plan.TableScanDesc; - -/** -* -* IndexWhereProcessor. -* Processes Operator Nodes to look for WHERE queries with a predicate column -* on which we have an index. Creates an index subquery Task for these -* WHERE queries to use the index automatically. -*/ -public class IndexWhereProcessor implements NodeProcessor { - - private static final Logger LOG = LoggerFactory.getLogger(IndexWhereProcessor.class.getName()); - private final Map> tsToIndices; - - public IndexWhereProcessor(Map> tsToIndices) { - super(); - this.tsToIndices = tsToIndices; - } - - @Override - /** - * Process a node of the operator tree. This matches on the rule in IndexWhereTaskDispatcher - */ - public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { - - TableScanOperator operator = (TableScanOperator) nd; - List opChildren = operator.getChildren(); - TableScanDesc operatorDesc = operator.getConf(); - if (operatorDesc == null || !tsToIndices.containsKey(operator)) { - return null; - } - List indexes = tsToIndices.get(operator); - - ExprNodeDesc predicate = operatorDesc.getFilterExpr(); - - IndexWhereProcCtx context = (IndexWhereProcCtx) procCtx; - ParseContext pctx = context.getParseContext(); - LOG.info("Processing predicate for index optimization"); - - if (predicate == null) { - LOG.info("null predicate pushed down"); - return null; - } - LOG.info(predicate.getExprString()); - - // check if we have tsToIndices on all partitions in this table scan - Set queryPartitions; - try { - queryPartitions = IndexUtils.checkPartitionsCoveredByIndex(operator, pctx, indexes); - if (queryPartitions == null) { // partitions not covered - return null; - } - } catch (HiveException e) { - LOG.error("Fatal Error: problem accessing metastore", e); - throw new SemanticException(e); - } - - // we can only process MapReduce tasks to check input size - if (!context.getCurrentTask().isMapRedTask()) { - return null; - } - MapRedTask currentTask = (MapRedTask) context.getCurrentTask(); - - // get potential reentrant index queries from each index - Map queryContexts = new HashMap(); - // make sure we have an index on the table being scanned - TableDesc tblDesc = operator.getTableDescSkewJoin(); - - Map> indexesByType = new HashMap>(); - for (Index indexOnTable : indexes) { - if (indexesByType.get(indexOnTable.getIndexHandlerClass()) == null) { - List newType = new ArrayList(); - newType.add(indexOnTable); - indexesByType.put(indexOnTable.getIndexHandlerClass(), newType); - } else { - indexesByType.get(indexOnTable.getIndexHandlerClass()).add(indexOnTable); - } - } - - // choose index type with most tsToIndices of the same type on the table - // TODO HIVE-2130 This would be a good place for some sort of cost based choice? - List bestIndexes = indexesByType.values().iterator().next(); - for (List indexTypes : indexesByType.values()) { - if (bestIndexes.size() < indexTypes.size()) { - bestIndexes = indexTypes; - } - } - - // rewrite index queries for the chosen index type - HiveIndexQueryContext tmpQueryContext = new HiveIndexQueryContext(); - tmpQueryContext.setQueryPartitions(queryPartitions); - rewriteForIndexes(predicate, bestIndexes, pctx, currentTask, tmpQueryContext); - List> indexTasks = tmpQueryContext.getQueryTasks(); - - if (indexTasks != null && indexTasks.size() > 0) { - queryContexts.put(bestIndexes.get(0), tmpQueryContext); - } - // choose an index rewrite to use - if (queryContexts.size() > 0) { - // TODO HIVE-2130 This would be a good place for some sort of cost based choice? - Index chosenIndex = queryContexts.keySet().iterator().next(); - - // modify the parse context to use indexing - // we need to delay this until we choose one index so that we don't attempt to modify pctx multiple times - HiveIndexQueryContext queryContext = queryContexts.get(chosenIndex); - - // prepare the map reduce job to use indexing - MapWork work = currentTask.getWork().getMapWork(); - work.setInputformat(queryContext.getIndexInputFormat()); - work.addIndexIntermediateFile(queryContext.getIndexIntermediateFile()); - // modify inputs based on index query - Set inputs = pctx.getSemanticInputs(); - inputs.addAll(queryContext.getAdditionalSemanticInputs()); - List> chosenRewrite = queryContext.getQueryTasks(); - - // add dependencies so index query runs first - insertIndexQuery(pctx, context, chosenRewrite); - } - - return null; - } - - /** - * Get a list of Tasks to activate use of tsToIndices. - * Generate the tasks for the index query (where we store results of - * querying the index in a tmp file) inside the IndexHandler - * @param predicate Predicate of query to rewrite - * @param index Index to use for rewrite - * @param pctx - * @param task original task before rewrite - * @param queryContext stores return values - */ - private void rewriteForIndexes(ExprNodeDesc predicate, List indexes, - ParseContext pctx, Task task, - HiveIndexQueryContext queryContext) - throws SemanticException { - HiveIndexHandler indexHandler; - // All tsToIndices in the list are of the same type, and therefore can use the - // same handler to generate the index query tasks - Index index = indexes.get(0); - try { - indexHandler = HiveUtils.getIndexHandler(pctx.getConf(), index.getIndexHandlerClass()); - } catch (HiveException e) { - LOG.error("Exception while loading IndexHandler: " + index.getIndexHandlerClass(), e); - throw new SemanticException("Failed to load indexHandler: " + index.getIndexHandlerClass(), e); - } - - // check the size - try { - ContentSummary inputSummary = Utilities.getInputSummary(pctx.getContext(), task.getWork().getMapWork(), null); - long inputSize = inputSummary.getLength(); - if (!indexHandler.checkQuerySize(inputSize, pctx.getConf())) { - queryContext.setQueryTasks(null); - return; - } - } catch (IOException e) { - throw new SemanticException("Failed to get task size", e); - } - - // use the IndexHandler to generate the index query - indexHandler.generateIndexQuery(indexes, predicate, pctx, queryContext); - // TODO HIVE-2115 use queryContext.residualPredicate to process residual predicate - - return; - } - - - /** - * Insert the rewrite tasks at the head of the pctx task tree - * @param pctx - * @param context - * @param chosenRewrite - */ - private void insertIndexQuery(ParseContext pctx, IndexWhereProcCtx context, List> chosenRewrite) { - Task wholeTableScan = context.getCurrentTask(); - LinkedHashSet> rewriteLeaves = new LinkedHashSet>(); - findLeaves(chosenRewrite, rewriteLeaves); - - for (Task leaf : rewriteLeaves) { - leaf.addDependentTask(wholeTableScan); // add full scan task as child for every index query task - } - - // replace the original with the index sub-query as a root task - pctx.replaceRootTask(wholeTableScan, chosenRewrite); - } - - /** - * Find the leaves of the task tree - */ - private void findLeaves(List> tasks, Set> leaves) { - for (Task t : tasks) { - if (t.getDependentTasks() == null) { - leaves.add(t); - } else { - findLeaves(t.getDependentTasks(), leaves); - } - } - } - -} - diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java deleted file mode 100644 index c9dae8faa0..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.optimizer.physical.index; - -import java.io.Serializable; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Stack; - -import com.google.common.collect.Maps; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.cache.CacheUtils; -import org.apache.hadoop.hive.ql.exec.Operator; -import org.apache.hadoop.hive.ql.exec.TableScanOperator; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.index.bitmap.BitmapIndexHandler; -import org.apache.hadoop.hive.ql.index.compact.CompactIndexHandler; -import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; -import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher; -import org.apache.hadoop.hive.ql.lib.Dispatcher; -import org.apache.hadoop.hive.ql.lib.GraphWalker; -import org.apache.hadoop.hive.ql.lib.Node; -import org.apache.hadoop.hive.ql.lib.NodeProcessor; -import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; -import org.apache.hadoop.hive.ql.lib.Rule; -import org.apache.hadoop.hive.ql.lib.RuleRegExp; -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.optimizer.IndexUtils; -import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext; -import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.MapredWork; -import org.apache.hadoop.hive.ql.plan.OperatorDesc; -import org.apache.hive.common.util.HiveStringUtils; - -/** - * - * IndexWhereTaskDispatcher. Walks a Task tree, and for the right kind of Task, - * walks the operator tree to create an index subquery. Then attaches the - * subquery task to the task tree. - * - */ -public class IndexWhereTaskDispatcher implements Dispatcher { - - private final PhysicalContext physicalContext; - // To store table to index mapping - private final Map> indexMap; - private final List supportedIndexes; - - public IndexWhereTaskDispatcher(PhysicalContext context) { - super(); - physicalContext = context; - indexMap = Maps.newHashMap(); - supportedIndexes = new ArrayList(); - supportedIndexes.add(CompactIndexHandler.class.getName()); - supportedIndexes.add(BitmapIndexHandler.class.getName()); - } - - @Override - public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) - throws SemanticException { - - Task task = (Task) nd; - - ParseContext pctx = physicalContext.getParseContext(); - - // create the regex's so the walker can recognize our WHERE queries - Map operatorRules = createOperatorRules(pctx); - - // check for no indexes on any table - if (operatorRules == null) { - return null; - } - - // create context so the walker can carry the current task with it. - IndexWhereProcCtx indexWhereOptimizeCtx = new IndexWhereProcCtx(task, pctx); - - // create the dispatcher, which fires the processor according to the rule that - // best matches - Dispatcher dispatcher = new DefaultRuleDispatcher(getDefaultProcessor(), - operatorRules, - indexWhereOptimizeCtx); - - // walk the mapper operator(not task) tree for each specific task - GraphWalker ogw = new DefaultGraphWalker(dispatcher); - ArrayList topNodes = new ArrayList(); - if (task.getWork() instanceof MapredWork) { - topNodes.addAll(((MapredWork)task.getWork()).getMapWork().getAliasToWork().values()); - } else { - return null; - } - ogw.startWalking(topNodes, null); - - return null; - } - - private List getIndex(Table table) throws SemanticException { - String indexCacheKey = CacheUtils.buildKey( - HiveStringUtils.normalizeIdentifier(table.getDbName()), - HiveStringUtils.normalizeIdentifier(table.getTableName())); - ListindexList = indexMap.get(indexCacheKey); - if (indexList == null) { - indexList = IndexUtils.getIndexes(table, supportedIndexes); - if (indexList == null) { - indexList = Collections.emptyList(); - } - indexMap.put(indexCacheKey, indexList); - } - return indexList; - } - - /** - * Create a set of rules that only matches WHERE predicates on columns we have - * an index on. - * @return - */ - private Map createOperatorRules(ParseContext pctx) throws SemanticException { - Map operatorRules = new LinkedHashMap(); - - // query the metastore to know what columns we have indexed - Map> indexes = new HashMap>(); - for (Operator op : pctx.getTopOps().values()) { - if (op instanceof TableScanOperator) { - List tblIndexes = getIndex(((TableScanOperator) op).getConf().getTableMetadata()); - if (tblIndexes.size() > 0) { - indexes.put((TableScanOperator) op, tblIndexes); - } - } - } - - // quit if our tables don't have any indexes - if (indexes.size() == 0) { - return null; - } - - // We set the pushed predicate from the WHERE clause as the filter expr on - // all table scan operators, so we look for table scan operators(TS%) - operatorRules.put(new RuleRegExp("RULEWhere", TableScanOperator.getOperatorName() + "%"), - new IndexWhereProcessor(indexes)); - - return operatorRules; - } - - - private NodeProcessor getDefaultProcessor() { - return new NodeProcessor() { - @Override - public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { - return null; - } - }; - } - -} diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index b766791ebc..f76e59a90b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -43,7 +43,6 @@ import org.antlr.runtime.tree.Tree; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -52,9 +51,7 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; @@ -82,9 +79,6 @@ import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType; -import org.apache.hadoop.hive.ql.index.HiveIndex; -import org.apache.hadoop.hive.ql.index.HiveIndex.IndexType; -import org.apache.hadoop.hive.ql.index.HiveIndexHandler; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.RCFileInputFormat; import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; @@ -105,8 +99,6 @@ import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc.OnePartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.AlterIndexDesc; -import org.apache.hadoop.hive.ql.plan.AlterIndexDesc.AlterIndexTypes; import org.apache.hadoop.hive.ql.plan.AlterMaterializedViewDesc; import org.apache.hadoop.hive.ql.plan.AlterMaterializedViewDesc.AlterMaterializedViewTypes; import org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc; @@ -120,7 +112,6 @@ import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc; import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc; import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc; @@ -131,7 +122,6 @@ import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; import org.apache.hadoop.hive.ql.plan.DescTableDesc; import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.DropIndexDesc; import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc; @@ -161,7 +151,6 @@ import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; import org.apache.hadoop.hive.ql.plan.ShowGrantDesc; -import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc; @@ -175,7 +164,6 @@ import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; -import org.apache.hadoop.hive.ql.session.LineageState; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.serde.serdeConstants; @@ -372,12 +360,6 @@ public void analyzeInternal(ASTNode input) throws SemanticException { case HiveParser.TOK_TRUNCATETABLE: analyzeTruncateTable(ast); break; - case HiveParser.TOK_CREATEINDEX: - analyzeCreateIndex(ast); - break; - case HiveParser.TOK_DROPINDEX: - analyzeDropIndex(ast); - break; case HiveParser.TOK_DESCTABLE: ctx.setResFile(ctx.getLocalTmpPath()); analyzeDescribeTable(ast); @@ -484,12 +466,6 @@ public void analyzeInternal(ASTNode input) throws SemanticException { } break; } - case HiveParser.TOK_ALTERINDEX_REBUILD: - analyzeAlterIndexRebuild(ast); - break; - case HiveParser.TOK_ALTERINDEX_PROPERTIES: - analyzeAlterIndexProps(ast); - break; case HiveParser.TOK_SHOWPARTITIONS: ctx.setResFile(ctx.getLocalTmpPath()); analyzeShowPartitions(ast); @@ -502,10 +478,6 @@ public void analyzeInternal(ASTNode input) throws SemanticException { ctx.setResFile(ctx.getLocalTmpPath()); analyzeShowCreateTable(ast); break; - case HiveParser.TOK_SHOWINDEXES: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowIndexes(ast); - break; case HiveParser.TOK_LOCKTABLE: analyzeLockTable(ast); break; @@ -1485,11 +1457,6 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { try { columnNames = getColumnNames((ASTNode)ast.getChild(1)); - // Throw an error if the table is indexed - List indexes = db.getIndexes(table.getDbName(), tableName, (short)1); - if (indexes != null && indexes.size() > 0) { - throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg()); - } // It would be possible to support this, but this is such a pointless command. if (AcidUtils.isInsertOnlyTable(table.getParameters())) { throw new SemanticException("Truncating MM table columns not presently supported"); @@ -1633,235 +1600,6 @@ public static boolean isFullSpec(Table table, Map partSpec) { return true; } - private void analyzeCreateIndex(ASTNode ast) throws SemanticException { - String indexName = unescapeIdentifier(ast.getChild(0).getText()); - String typeName = unescapeSQLString(ast.getChild(1).getText()); - String[] qTabName = getQualifiedTableName((ASTNode) ast.getChild(2)); - List indexedCols = getColumnNames((ASTNode) ast.getChild(3)); - - IndexType indexType = HiveIndex.getIndexType(typeName); - if (indexType != null) { - typeName = indexType.getHandlerClsName(); - } else { - try { - JavaUtils.loadClass(typeName); - } catch (Exception e) { - throw new SemanticException("class name provided for index handler not found.", e); - } - } - - String indexTableName = null; - boolean deferredRebuild = false; - String location = null; - Map tblProps = null; - Map idxProps = null; - String indexComment = null; - - RowFormatParams rowFormatParams = new RowFormatParams(); - StorageFormat storageFormat = new StorageFormat(conf); - - for (int idx = 4; idx < ast.getChildCount(); idx++) { - ASTNode child = (ASTNode) ast.getChild(idx); - if (storageFormat.fillStorageFormat(child)) { - continue; - } - switch (child.getToken().getType()) { - case HiveParser.TOK_TABLEROWFORMAT: - rowFormatParams.analyzeRowFormat(child); - break; - case HiveParser.TOK_CREATEINDEX_INDEXTBLNAME: - ASTNode ch = (ASTNode) child.getChild(0); - indexTableName = getUnescapedName(ch); - break; - case HiveParser.TOK_DEFERRED_REBUILDINDEX: - deferredRebuild = true; - break; - case HiveParser.TOK_TABLELOCATION: - location = unescapeSQLString(child.getChild(0).getText()); - addLocationToOutputs(location); - break; - case HiveParser.TOK_TABLEPROPERTIES: - tblProps = DDLSemanticAnalyzer.getProps((ASTNode) child.getChild(0)); - break; - case HiveParser.TOK_INDEXPROPERTIES: - idxProps = DDLSemanticAnalyzer.getProps((ASTNode) child.getChild(0)); - break; - case HiveParser.TOK_TABLESERIALIZER: - child = (ASTNode) child.getChild(0); - storageFormat.setSerde(unescapeSQLString(child.getChild(0).getText())); - if (child.getChildCount() == 2) { - readProps((ASTNode) (child.getChild(1).getChild(0)), - storageFormat.getSerdeProps()); - } - break; - case HiveParser.TOK_INDEXCOMMENT: - child = (ASTNode) child.getChild(0); - indexComment = unescapeSQLString(child.getText()); - } - } - - storageFormat.fillDefaultStorageFormat(false, false); - if (indexTableName == null) { - indexTableName = MetaStoreUtils.getIndexTableName(qTabName[0], qTabName[1], indexName); - indexTableName = qTabName[0] + "." + indexTableName; // on same database with base table - } else { - indexTableName = getDotName(Utilities.getDbTableName(indexTableName)); - } - inputs.add(new ReadEntity(getTable(qTabName))); - - CreateIndexDesc crtIndexDesc = new CreateIndexDesc(getDotName(qTabName), indexName, - indexedCols, indexTableName, deferredRebuild, storageFormat.getInputFormat(), - storageFormat.getOutputFormat(), - storageFormat.getStorageHandler(), typeName, location, idxProps, tblProps, - storageFormat.getSerde(), storageFormat.getSerdeProps(), rowFormatParams.collItemDelim, - rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, - rowFormatParams.lineDelim, rowFormatParams.mapKeyDelim, indexComment); - Task createIndex = - TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtIndexDesc), conf); - rootTasks.add(createIndex); - } - - private void analyzeDropIndex(ASTNode ast) throws SemanticException { - String indexName = unescapeIdentifier(ast.getChild(0).getText()); - String tableName = getUnescapedName((ASTNode) ast.getChild(1)); - boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null); - // we want to signal an error if the index doesn't exist and we're - // configured not to ignore this - boolean throwException = - !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT); - Table tbl = getTable(tableName, false); - if (throwException && tbl == null) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName)); - } - try { - Index idx = db.getIndex(tableName, indexName); - } catch (HiveException e) { - if (!(e.getCause() instanceof NoSuchObjectException)) { - throw new SemanticException(ErrorMsg.CANNOT_DROP_INDEX.getMsg("dropping index"), e); - } - if (throwException) { - throw new SemanticException(ErrorMsg.INVALID_INDEX.getMsg(indexName)); - } - } - if (tbl != null) { - inputs.add(new ReadEntity(tbl)); - } - - DropIndexDesc dropIdxDesc = new DropIndexDesc(indexName, tableName, throwException); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - dropIdxDesc), conf)); - } - - private void analyzeAlterIndexRebuild(ASTNode ast) throws SemanticException { - String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); - String indexName = unescapeIdentifier(ast.getChild(1).getText()); - HashMap partSpec = null; - Tree part = ast.getChild(2); - if (part != null) { - partSpec = getValidatedPartSpec(getTable(qualified), (ASTNode)part, conf, false); - } - List> indexBuilder = getIndexBuilderMapRed(qualified, indexName, partSpec); - rootTasks.addAll(indexBuilder); - - // Handle updating index timestamps - AlterIndexDesc alterIdxDesc = new AlterIndexDesc(AlterIndexTypes.UPDATETIMESTAMP); - alterIdxDesc.setIndexName(indexName); - alterIdxDesc.setBaseTableName(getDotName(qualified)); - alterIdxDesc.setSpec(partSpec); - - Task tsTask = TaskFactory.get(new DDLWork(alterIdxDesc), conf); - for (Task t : indexBuilder) { - t.addDependentTask(tsTask); - } - } - - private void analyzeAlterIndexProps(ASTNode ast) - throws SemanticException { - - String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); - String indexName = unescapeIdentifier(ast.getChild(1).getText()); - HashMap mapProp = getProps((ASTNode) (ast.getChild(2)) - .getChild(0)); - - AlterIndexDesc alterIdxDesc = new AlterIndexDesc(AlterIndexTypes.ADDPROPS); - alterIdxDesc.setProps(mapProp); - alterIdxDesc.setIndexName(indexName); - alterIdxDesc.setBaseTableName(getDotName(qualified)); - - rootTasks.add(TaskFactory.get(new DDLWork(alterIdxDesc), conf)); - } - - private List> getIndexBuilderMapRed(String[] names, String indexName, - HashMap partSpec) throws SemanticException { - try { - Index index = db.getIndex(names[0], names[1], indexName); - Table indexTbl = null; - String indexTableName = index.getIndexTableName(); - if (indexTableName != null) { - indexTbl = getTable(Utilities.getDbTableName(index.getDbName(), indexTableName)); - } - Table baseTbl = getTable(new String[] {index.getDbName(), index.getOrigTableName()}); - - String handlerCls = index.getIndexHandlerClass(); - HiveIndexHandler handler = HiveUtils.getIndexHandler(conf, handlerCls); - - List indexTblPartitions = null; - List baseTblPartitions = null; - if (indexTbl != null) { - indexTblPartitions = new ArrayList(); - baseTblPartitions = preparePartitions(baseTbl, partSpec, - indexTbl, db, indexTblPartitions); - } - - LineageState lineageState = queryState.getLineageState(); - List> ret = handler.generateIndexBuildTaskList(baseTbl, - index, indexTblPartitions, baseTblPartitions, indexTbl, getInputs(), getOutputs(), - lineageState); - return ret; - } catch (Exception e) { - throw new SemanticException(e); - } - } - - private List preparePartitions( - org.apache.hadoop.hive.ql.metadata.Table baseTbl, - HashMap partSpec, - org.apache.hadoop.hive.ql.metadata.Table indexTbl, Hive db, - List indexTblPartitions) - throws HiveException, MetaException { - List baseTblPartitions = new ArrayList(); - if (partSpec != null) { - // if partspec is specified, then only producing index for that - // partition - Partition part = db.getPartition(baseTbl, partSpec, false); - if (part == null) { - throw new HiveException("Partition " - + Warehouse.makePartName(partSpec, false) - + " does not exist in table " - + baseTbl.getTableName()); - } - baseTblPartitions.add(part); - Partition indexPart = db.getPartition(indexTbl, partSpec, false); - if (indexPart == null) { - indexPart = db.createPartition(indexTbl, partSpec); - } - indexTblPartitions.add(indexPart); - } else if (baseTbl.isPartitioned()) { - // if no partition is specified, create indexes for all partitions one - // by one. - baseTblPartitions = db.getPartitions(baseTbl); - for (Partition basePart : baseTblPartitions) { - HashMap pSpec = basePart.getSpec(); - Partition indexPart = db.getPartition(indexTbl, pSpec, false); - if (indexPart == null) { - indexPart = db.createPartition(indexTbl, pSpec); - } - indexTblPartitions.add(indexPart); - } - } - return baseTblPartitions; - } - private void validateAlterTableType(Table tbl, AlterTableTypes op) throws SemanticException { validateAlterTableType(tbl, op, false); } @@ -2159,17 +1897,6 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, List bucketCols = null; Class inputFormatClass = null; boolean isArchived = false; - boolean checkIndex = HiveConf.getBoolVar(conf, - HiveConf.ConfVars.HIVE_CONCATENATE_CHECK_INDEX); - if (checkIndex) { - List indexes = db.getIndexes(tblObj.getDbName(), tblObj.getTableName(), - Short.MAX_VALUE); - if (indexes != null && indexes.size() > 0) { - throw new SemanticException("can not do merge because source table " - + tableName + " is indexed."); - } - } - if (tblObj.isPartitioned()) { if (partSpec == null) { throw new SemanticException("source table " + tableName @@ -2731,6 +2458,7 @@ private void analyzeShowCreateDatabase(ASTNode ast) throws SemanticException { setFetchTask(createFetchTask(showCreateDbDesc.getSchema())); } + private void analyzeShowCreateTable(ASTNode ast) throws SemanticException { ShowCreateTableDesc showCreateTblDesc; String tableName = getUnescapedName((ASTNode)ast.getChild(0)); @@ -2865,21 +2593,6 @@ private void analyzeShowTableProperties(ASTNode ast) throws SemanticException { setFetchTask(createFetchTask(showTblPropertiesDesc.getSchema())); } - private void analyzeShowIndexes(ASTNode ast) throws SemanticException { - ShowIndexesDesc showIndexesDesc; - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - showIndexesDesc = new ShowIndexesDesc(tableName, ctx.getResFile()); - - if (ast.getChildCount() == 2) { - int descOptions = ast.getChild(1).getType(); - showIndexesDesc.setFormatted(descOptions == HiveParser.KW_FORMATTED); - } - - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showIndexesDesc), conf)); - setFetchTask(createFetchTask(showIndexesDesc.getSchema())); - } - /** * Add the task according to the parsed command tree. This is used for the CLI * command "SHOW FUNCTIONS;". diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 907362312d..3e84fd6a23 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -151,10 +151,6 @@ TOK_COLTYPELIST; TOK_CREATEDATABASE; TOK_CREATETABLE; TOK_TRUNCATETABLE; -TOK_CREATEINDEX; -TOK_CREATEINDEX_INDEXTBLNAME; -TOK_DEFERRED_REBUILDINDEX; -TOK_DROPINDEX; TOK_LIKETABLE; TOK_DESCTABLE; TOK_DESCFUNCTION; @@ -189,8 +185,6 @@ TOK_ALTERTABLE_CLUSTER_SORT; TOK_ALTERTABLE_COMPACT; TOK_ALTERTABLE_DROPCONSTRAINT; TOK_ALTERTABLE_ADDCONSTRAINT; -TOK_ALTERINDEX_REBUILD; -TOK_ALTERINDEX_PROPERTIES; TOK_MSCK; TOK_SHOWDATABASES; TOK_SHOWTABLES; @@ -271,8 +265,6 @@ TOK_EXPLAIN_SQ_REWRITE; TOK_TABLESERIALIZER; TOK_TABLEPROPERTIES; TOK_TABLEPROPLIST; -TOK_INDEXPROPERTIES; -TOK_INDEXPROPLIST; TOK_TABTYPE; TOK_LIMIT; TOK_OFFSET; @@ -310,7 +302,6 @@ TOK_PRIV_ALTER_METADATA; TOK_PRIV_ALTER_DATA; TOK_PRIV_DELETE; TOK_PRIV_DROP; -TOK_PRIV_INDEX; TOK_PRIV_INSERT; TOK_PRIV_LOCK; TOK_PRIV_SELECT; @@ -324,9 +315,7 @@ TOK_SHOW_ROLE_GRANT; TOK_SHOW_ROLES; TOK_SHOW_SET_ROLE; TOK_SHOW_ROLE_PRINCIPALS; -TOK_SHOWINDEXES; TOK_SHOWDBLOCKS; -TOK_INDEXCOMMENT; TOK_DESCDATABASE; TOK_DATABASEPROPERTIES; TOK_DATABASELOCATION; @@ -929,8 +918,6 @@ ddlStatement | dropMaterializedViewStatement | createFunctionStatement | createMacroStatement - | createIndexStatement - | dropIndexStatement | dropFunctionStatement | reloadFunctionStatement | dropMacroStatement @@ -1102,80 +1089,6 @@ truncateTableStatement @after { popMsg(state); } : KW_TRUNCATE KW_TABLE tablePartitionPrefix (KW_COLUMNS LPAREN columnNameList RPAREN)? -> ^(TOK_TRUNCATETABLE tablePartitionPrefix columnNameList?); -createIndexStatement -@init { pushMsg("create index statement", state);} -@after {popMsg(state);} - : KW_CREATE KW_INDEX indexName=identifier - KW_ON KW_TABLE tab=tableName LPAREN indexedCols=columnNameList RPAREN - KW_AS typeName=StringLiteral - autoRebuild? - indexPropertiesPrefixed? - indexTblName? - tableRowFormat? - tableFileFormat? - tableLocation? - tablePropertiesPrefixed? - indexComment? - ->^(TOK_CREATEINDEX $indexName $typeName $tab $indexedCols - autoRebuild? - indexPropertiesPrefixed? - indexTblName? - tableRowFormat? - tableFileFormat? - tableLocation? - tablePropertiesPrefixed? - indexComment?) - ; - -indexComment -@init { pushMsg("comment on an index", state);} -@after {popMsg(state);} - : - KW_COMMENT comment=StringLiteral -> ^(TOK_INDEXCOMMENT $comment) - ; - -autoRebuild -@init { pushMsg("auto rebuild index", state);} -@after {popMsg(state);} - : KW_WITH KW_DEFERRED KW_REBUILD - ->^(TOK_DEFERRED_REBUILDINDEX) - ; - -indexTblName -@init { pushMsg("index table name", state);} -@after {popMsg(state);} - : KW_IN KW_TABLE indexTbl=tableName - ->^(TOK_CREATEINDEX_INDEXTBLNAME $indexTbl) - ; - -indexPropertiesPrefixed -@init { pushMsg("table properties with prefix", state); } -@after { popMsg(state); } - : - KW_IDXPROPERTIES! indexProperties - ; - -indexProperties -@init { pushMsg("index properties", state); } -@after { popMsg(state); } - : - LPAREN indexPropertiesList RPAREN -> ^(TOK_INDEXPROPERTIES indexPropertiesList) - ; - -indexPropertiesList -@init { pushMsg("index properties list", state); } -@after { popMsg(state); } - : - keyValueProperty (COMMA keyValueProperty)* -> ^(TOK_INDEXPROPLIST keyValueProperty+) - ; - -dropIndexStatement -@init { pushMsg("drop index statement", state);} -@after {popMsg(state);} - : KW_DROP KW_INDEX ifExists? indexName=identifier KW_ON tab=tableName - ->^(TOK_DROPINDEX $indexName $tab ifExists?) - ; - dropTableStatement @init { pushMsg("drop statement", state); } @after { popMsg(state); } @@ -1190,7 +1103,6 @@ alterStatement | KW_ALTER KW_VIEW tableName KW_AS? alterViewStatementSuffix -> ^(TOK_ALTERVIEW tableName alterViewStatementSuffix) | KW_ALTER KW_MATERIALIZED KW_VIEW tableName alterMaterializedViewStatementSuffix -> ^(TOK_ALTER_MATERIALIZED_VIEW tableName alterMaterializedViewStatementSuffix) - | KW_ALTER KW_INDEX alterIndexStatementSuffix -> alterIndexStatementSuffix | KW_ALTER (KW_DATABASE|KW_SCHEMA) alterDatabaseStatementSuffix -> alterDatabaseStatementSuffix ; @@ -1254,20 +1166,6 @@ alterMaterializedViewStatementSuffix | alterMaterializedViewSuffixRebuild ; -alterIndexStatementSuffix -@init { pushMsg("alter index statement", state); } -@after { popMsg(state); } - : indexName=identifier KW_ON tableName partitionSpec? - ( - KW_REBUILD - ->^(TOK_ALTERINDEX_REBUILD tableName $indexName partitionSpec?) - | - KW_SET KW_IDXPROPERTIES - indexProperties - ->^(TOK_ALTERINDEX_PROPERTIES tableName $indexName indexProperties) - ) - ; - alterDatabaseStatementSuffix @init { pushMsg("alter database statement", state); } @after { popMsg(state); } @@ -1652,8 +1550,6 @@ showStatement | (parttype=partTypeExpr)? (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWLOCKS $parttype? $isExtended?) ) - | KW_SHOW (showOptions=KW_FORMATTED)? (KW_INDEX|KW_INDEXES) KW_ON showStmtIdentifier ((KW_FROM|KW_IN) db_name=identifier)? - -> ^(TOK_SHOWINDEXES showStmtIdentifier $showOptions? $db_name?) | KW_SHOW KW_COMPACTIONS -> ^(TOK_SHOW_COMPACTIONS) | KW_SHOW KW_TRANSACTIONS -> ^(TOK_SHOW_TRANSACTIONS) | KW_SHOW KW_CONF StringLiteral -> ^(TOK_SHOWCONF StringLiteral) @@ -1839,7 +1735,6 @@ privilegeType | KW_UPDATE -> ^(TOK_PRIV_ALTER_DATA) | KW_CREATE -> ^(TOK_PRIV_CREATE) | KW_DROP -> ^(TOK_PRIV_DROP) - | KW_INDEX -> ^(TOK_PRIV_INDEX) | KW_LOCK -> ^(TOK_PRIV_LOCK) | KW_SELECT -> ^(TOK_PRIV_SELECT) | KW_SHOW_DATABASE -> ^(TOK_PRIV_SHOW_DATABASE) diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java deleted file mode 100644 index 22b66979dd..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.parse; - - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.ql.Driver; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.optimizer.IndexUtils; -import org.apache.hadoop.hive.ql.plan.LoadTableDesc; -import org.apache.hadoop.hive.ql.plan.TableDesc; -import org.apache.hadoop.hive.ql.session.LineageState; - -import java.io.Serializable; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; - -public class IndexUpdater { - private List loadTableWork; - private HiveConf conf; - // Assumes one instance of this + single-threaded compilation for each query. - private Hive hive; - private List> tasks; - private Set inputs; - private LineageState lineageState; - - public IndexUpdater(List loadTableWork, Set inputs, Configuration conf, - LineageState lineageState) { - this.loadTableWork = loadTableWork; - this.inputs = inputs; - this.conf = new HiveConf(conf, IndexUpdater.class); - this.lineageState = lineageState; - this.tasks = new LinkedList>(); - } - - public IndexUpdater(LoadTableDesc loadTableWork, Set inputs, - Configuration conf) { - this.loadTableWork = new LinkedList(); - this.loadTableWork.add(loadTableWork); - this.conf = new HiveConf(conf, IndexUpdater.class); - this.tasks = new LinkedList>(); - this.inputs = inputs; - } - - public List> generateUpdateTasks() throws - HiveException { - hive = Hive.get(this.conf); - for (LoadTableDesc ltd : loadTableWork) { - TableDesc td = ltd.getTable(); - Table srcTable = hive.getTable(td.getTableName()); - List tblIndexes = IndexUtils.getAllIndexes(srcTable, (short)-1); - Map partSpec = ltd.getPartitionSpec(); - if (partSpec == null || partSpec.size() == 0) { - //unpartitioned table, update whole index - doIndexUpdate(tblIndexes); - } else { - doIndexUpdate(tblIndexes, partSpec); - } - } - return tasks; - } - - private void doIndexUpdate(List tblIndexes) throws HiveException { - for (Index idx : tblIndexes) { - StringBuilder sb = new StringBuilder(); - sb.append("ALTER INDEX "); - sb.append(idx.getIndexName()); - sb.append(" ON "); - sb.append(idx.getDbName()).append('.'); - sb.append(idx.getOrigTableName()); - sb.append(" REBUILD"); - compileRebuild(sb.toString()); - } - } - - private void doIndexUpdate(List tblIndexes, Map - partSpec) throws HiveException { - for (Index index : tblIndexes) { - if (containsPartition(index, partSpec)) { - doIndexUpdate(index, partSpec); - } - } - } - - private void doIndexUpdate(Index index, Map partSpec) { - StringBuilder ps = new StringBuilder(); - boolean first = true; - ps.append("("); - for (String key : partSpec.keySet()) { - if (!first) { - ps.append(", "); - } else { - first = false; - } - ps.append(key); - ps.append("="); - ps.append(partSpec.get(key)); - } - ps.append(")"); - StringBuilder sb = new StringBuilder(); - sb.append("ALTER INDEX "); - sb.append(index.getIndexName()); - sb.append(" ON "); - sb.append(index.getDbName()).append('.'); - sb.append(index.getOrigTableName()); - sb.append(" PARTITION "); - sb.append(ps.toString()); - sb.append(" REBUILD"); - compileRebuild(sb.toString()); - } - - private void compileRebuild(String query) { - Driver driver = new Driver(this.conf, lineageState); - driver.compile(query, false); - tasks.addAll(driver.getPlan().getRootTasks()); - inputs.addAll(driver.getPlan().getInputs()); - } - - - private boolean containsPartition(Index index, - Map partSpec) throws HiveException { - String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName()); - Table indexTable = hive.getTable(qualified[0], qualified[1]); - List parts = hive.getPartitions(indexTable, partSpec); - return (parts == null || parts.size() == 0); - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index 361976311d..cc66936065 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -360,25 +360,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { statTask = TaskFactory.get(columnStatsWork, conf); } - // HIVE-3334 has been filed for load file with index auto update - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEINDEXAUTOUPDATE)) { - IndexUpdater indexUpdater = new IndexUpdater(loadTableWork, getInputs(), conf); - try { - List> indexUpdateTasks = indexUpdater.generateUpdateTasks(); - - for (Task updateTask : indexUpdateTasks) { - //LOAD DATA will either have a copy & move or just a move, - // we always want the update to be dependent on the move - childTask.addDependentTask(updateTask); - if (statTask != null) { - updateTask.addDependentTask(statTask); - } - } - } catch (HiveException e) { - console.printInfo("WARNING: could not auto-update stale indexes, indexes are not out of sync"); - } - } - else if (statTask != null) { + if (statTask != null) { childTask.addDependentTask(statTask); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 2e1f50e641..34963ff0c9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -74,7 +74,6 @@ commandType.put(HiveParser.TOK_SHOW_CREATEDATABASE, HiveOperation.SHOW_CREATEDATABASE); commandType.put(HiveParser.TOK_SHOW_CREATETABLE, HiveOperation.SHOW_CREATETABLE); commandType.put(HiveParser.TOK_SHOWFUNCTIONS, HiveOperation.SHOWFUNCTIONS); - commandType.put(HiveParser.TOK_SHOWINDEXES, HiveOperation.SHOWINDEXES); commandType.put(HiveParser.TOK_SHOWPARTITIONS, HiveOperation.SHOWPARTITIONS); commandType.put(HiveParser.TOK_SHOWLOCKS, HiveOperation.SHOWLOCKS); commandType.put(HiveParser.TOK_SHOWDBLOCKS, HiveOperation.SHOWLOCKS); @@ -90,10 +89,6 @@ commandType.put(HiveParser.TOK_CREATE_MATERIALIZED_VIEW, HiveOperation.CREATE_MATERIALIZED_VIEW); commandType.put(HiveParser.TOK_DROPVIEW, HiveOperation.DROPVIEW); commandType.put(HiveParser.TOK_DROP_MATERIALIZED_VIEW, HiveOperation.DROP_MATERIALIZED_VIEW); - commandType.put(HiveParser.TOK_CREATEINDEX, HiveOperation.CREATEINDEX); - commandType.put(HiveParser.TOK_DROPINDEX, HiveOperation.DROPINDEX); - commandType.put(HiveParser.TOK_ALTERINDEX_REBUILD, HiveOperation.ALTERINDEX_REBUILD); - commandType.put(HiveParser.TOK_ALTERINDEX_PROPERTIES, HiveOperation.ALTERINDEX_PROPS); commandType.put(HiveParser.TOK_ALTERVIEW_PROPERTIES, HiveOperation.ALTERVIEW_PROPERTIES); commandType.put(HiveParser.TOK_ALTERVIEW_DROPPROPERTIES, HiveOperation.ALTERVIEW_PROPERTIES); commandType.put(HiveParser.TOK_ALTERVIEW_ADDPARTS, HiveOperation.ALTERTABLE_ADDPARTS); @@ -299,8 +294,6 @@ private static BaseSemanticAnalyzer getInternal(QueryState queryState, ASTNode t case HiveParser.TOK_DESCTABLE: case HiveParser.TOK_DESCFUNCTION: case HiveParser.TOK_MSCK: - case HiveParser.TOK_ALTERINDEX_REBUILD: - case HiveParser.TOK_ALTERINDEX_PROPERTIES: case HiveParser.TOK_SHOWDATABASES: case HiveParser.TOK_SHOWTABLES: case HiveParser.TOK_SHOWCOLUMNS: @@ -310,7 +303,6 @@ private static BaseSemanticAnalyzer getInternal(QueryState queryState, ASTNode t case HiveParser.TOK_SHOW_CREATETABLE: case HiveParser.TOK_SHOWFUNCTIONS: case HiveParser.TOK_SHOWPARTITIONS: - case HiveParser.TOK_SHOWINDEXES: case HiveParser.TOK_SHOWLOCKS: case HiveParser.TOK_SHOWDBLOCKS: case HiveParser.TOK_SHOW_COMPACTIONS: @@ -319,8 +311,6 @@ private static BaseSemanticAnalyzer getInternal(QueryState queryState, ASTNode t case HiveParser.TOK_SHOWCONF: case HiveParser.TOK_SHOWVIEWS: case HiveParser.TOK_SHOWMATERIALIZEDVIEWS: - case HiveParser.TOK_CREATEINDEX: - case HiveParser.TOK_DROPINDEX: case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT: case HiveParser.TOK_LOCKTABLE: case HiveParser.TOK_UNLOCKTABLE: diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 92d29e3a57..3122db8267 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -220,21 +220,6 @@ public void compile(final ParseContext pCtx, .get(new MoveWork(null, null, ltd, null, false), conf); mvTask.add(tsk); - // Check to see if we are stale'ing any indexes and auto-update them if we want - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEINDEXAUTOUPDATE)) { - IndexUpdater indexUpdater = new IndexUpdater(loadTableWork, inputs, conf, - queryState.getLineageState()); - try { - List> indexUpdateTasks = indexUpdater - .generateUpdateTasks(); - for (Task updateTask : indexUpdateTasks) { - tsk.addDependentTask(updateTask); - } - } catch (HiveException e) { - console - .printInfo("WARNING: could not auto-update stale indexes, which are not in sync"); - } - } } boolean oneLoadFileForCtas = true; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java deleted file mode 100644 index a335495c12..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.Map; - -import org.apache.hadoop.hive.ql.plan.Explain.Level; - -/** - * AlterIndexDesc. - * - */ -@Explain(displayName = "Alter Index", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class AlterIndexDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 1L; - private String indexName; - private String baseTable; - private Map partSpec; // partition specification of partitions touched - private Map props; - - /** - * alterIndexTypes. - * - */ - public static enum AlterIndexTypes { - UPDATETIMESTAMP, - ADDPROPS}; - - AlterIndexTypes op; - - public AlterIndexDesc() { - } - - public AlterIndexDesc(AlterIndexTypes type) { - this.op = type; - } - - /** - * @return the name of the index - */ - @Explain(displayName = "name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getIndexName() { - return indexName; - } - - /** - * @param indexName - * the indexName to set - */ - public void setIndexName(String indexName) { - this.indexName = indexName; - } - - /** - * @return the baseTable - */ - @Explain(displayName = "new name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getBaseTableName() { - return baseTable; - } - - /** - * @param baseTable - * the baseTable to set - */ - public void setBaseTableName(String baseTable) { - this.baseTable = baseTable; - } - - /** - * @return the partition spec - */ - public Map getSpec() { - return partSpec; - } - - /** - * @param partSpec - * the partition spec to set - */ - public void setSpec(Map partSpec) { - this.partSpec = partSpec; - } - - /** - * @return the op - */ - public AlterIndexTypes getOp() { - return op; - } - - /** - * @param op - * the op to set - */ - public void setOp(AlterIndexTypes op) { - this.op = op; - } - - /** - * @return the props - */ - @Explain(displayName = "properties") - public Map getProps() { - return props; - } - - /** - * @param props - * the props to set - */ - public void setProps(Map props) { - this.props = props; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateIndexDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateIndexDesc.java deleted file mode 100644 index c003ee5a9f..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateIndexDesc.java +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.List; -import java.util.Map; - -/** - * create index descriptor - */ -public class CreateIndexDesc extends DDLDesc implements Serializable { - - private static final long serialVersionUID = 1L; - String tableName; - String indexName; - List indexedCols; - String indexTableName; - boolean deferredRebuild; - String inputFormat; - String outputFormat; - String serde; - String storageHandler; - String indexTypeHandlerClass; - String location; - Map idxProps; - Map tblProps; - Map serdeProps; - String collItemDelim; - String fieldDelim; - String fieldEscape; - String lineDelim; - String mapKeyDelim; - - String indexComment; - - public CreateIndexDesc() { - super(); - } - - public CreateIndexDesc(String tableName, String indexName, - List indexedCols, String indexTableName, boolean deferredRebuild, - String inputFormat, String outputFormat, String storageHandler, - String typeName, String location, Map idxProps, Map tblProps, - String serde, Map serdeProps, String collItemDelim, - String fieldDelim, String fieldEscape, String lineDelim, - String mapKeyDelim, String indexComment) { - super(); - this.tableName = tableName; - this.indexName = indexName; - this.indexedCols = indexedCols; - this.indexTableName = indexTableName; - this.deferredRebuild = deferredRebuild; - this.inputFormat = inputFormat; - this.outputFormat = outputFormat; - this.serde = serde; - this.storageHandler = storageHandler; - this.indexTypeHandlerClass = typeName; - this.location = location; - this.idxProps = idxProps; - this.tblProps = tblProps; - this.serde = serde; - this.serdeProps = serdeProps; - this.collItemDelim = collItemDelim; - this.fieldDelim = fieldDelim; - this.fieldEscape = fieldEscape; - this.lineDelim = lineDelim; - this.mapKeyDelim = mapKeyDelim; - this.indexComment = indexComment; - } - - public String getTableName() { - return tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public String getIndexName() { - return indexName; - } - - public void setIndexName(String indexName) { - this.indexName = indexName; - } - - public List getIndexedCols() { - return indexedCols; - } - - public void setIndexedCols(List indexedCols) { - this.indexedCols = indexedCols; - } - - public String getIndexTableName() { - return indexTableName; - } - - public void setIndexTableName(String indexTableName) { - this.indexTableName = indexTableName; - } - - public boolean isDeferredRebuild() { - return deferredRebuild; - } - - public boolean getDeferredRebuild() { - return deferredRebuild; - } - - public void setDeferredRebuild(boolean deferredRebuild) { - this.deferredRebuild = deferredRebuild; - } - - public String getInputFormat() { - return inputFormat; - } - - public void setInputFormat(String inputFormat) { - this.inputFormat = inputFormat; - } - - public String getOutputFormat() { - return outputFormat; - } - - public void setOutputFormat(String outputFormat) { - this.outputFormat = outputFormat; - } - - public String getSerde() { - return serde; - } - - public void setSerde(String serde) { - this.serde = serde; - } - - public String getStorageHandler() { - return storageHandler; - } - - public void setStorageHandler(String storageHandler) { - this.storageHandler = storageHandler; - } - - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - - public Map getIdxProps() { - return idxProps; - } - - public void setIdxProps(Map idxProps) { - this.idxProps = idxProps; - } - - public Map getTblProps() { - return tblProps; - } - - public void setTblProps(Map tblProps) { - this.tblProps = tblProps; - } - - public Map getSerdeProps() { - return serdeProps; - } - - public void setSerdeProps(Map serdeProps) { - this.serdeProps = serdeProps; - } - - public String getCollItemDelim() { - return collItemDelim; - } - - public void setCollItemDelim(String collItemDelim) { - this.collItemDelim = collItemDelim; - } - - public String getFieldDelim() { - return fieldDelim; - } - - public void setFieldDelim(String fieldDelim) { - this.fieldDelim = fieldDelim; - } - - public String getFieldEscape() { - return fieldEscape; - } - - public void setFieldEscape(String fieldEscape) { - this.fieldEscape = fieldEscape; - } - - public String getLineDelim() { - return lineDelim; - } - - public void setLineDelim(String lineDelim) { - this.lineDelim = lineDelim; - } - - public String getMapKeyDelim() { - return mapKeyDelim; - } - - public void setMapKeyDelim(String mapKeyDelim) { - this.mapKeyDelim = mapKeyDelim; - } - - public String getIndexTypeHandlerClass() { - return indexTypeHandlerClass; - } - - public void setIndexTypeHandlerClass(String indexTypeHandlerClass) { - this.indexTypeHandlerClass = indexTypeHandlerClass; - } - - public String getIndexComment() { - return indexComment; - } - - public void setIndexComment(String indexComment) { - this.indexComment = indexComment; - } - -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index 52c105fadb..98da309094 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -36,10 +36,7 @@ // TODO: this can probably be replaced with much less code via dynamic dispatch and/or templates. private PreInsertTableDesc preInsertTableDesc; private InsertTableDesc insertTableDesc; - private CreateIndexDesc createIndexDesc; - private AlterIndexDesc alterIndexDesc; private AlterMaterializedViewDesc alterMVDesc; - private DropIndexDesc dropIdxDesc; private CreateDatabaseDesc createDatabaseDesc; private SwitchDatabaseDesc switchDatabaseDesc; private DropDatabaseDesc dropDatabaseDesc; @@ -71,7 +68,6 @@ private AlterTableSimpleDesc alterTblSimpleDesc; private MsckDesc msckDesc; private ShowTableStatusDesc showTblStatusDesc; - private ShowIndexesDesc showIndexesDesc; private DescDatabaseDesc descDbDesc; private AlterDatabaseDesc alterDbDesc; private AlterTableAlterPartDesc alterTableAlterPartDesc; @@ -125,16 +121,6 @@ public DDLWork(HashSet inputs, HashSet outputs) { this.outputs = outputs; } - public DDLWork(HashSet inputs, HashSet outputs, - CreateIndexDesc createIndex) { - this(inputs, outputs); - this.createIndexDesc = createIndex; - } - - public DDLWork(AlterIndexDesc alterIndex) { - this.alterIndexDesc = alterIndex; - } - /** * @param createDatabaseDesc * Create Database descriptor @@ -209,16 +195,6 @@ public DDLWork(HashSet inputs, HashSet outputs, } /** - * @param alterIdxDesc - * alter index descriptor - */ - public DDLWork(HashSet inputs, HashSet outputs, - AlterIndexDesc alterIndexDesc) { - this(inputs, outputs); - this.alterIndexDesc = alterIndexDesc; - } - - /** * @param alterMVDesc * alter materialized view descriptor */ @@ -492,12 +468,6 @@ public DDLWork(HashSet inputs, HashSet outputs, } public DDLWork(HashSet inputs, HashSet outputs, - DropIndexDesc dropIndexDesc) { - this(inputs, outputs); - this.dropIdxDesc = dropIndexDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, RoleDDLDesc roleDDLDesc) { this(inputs, outputs); this.roleDDLDesc = roleDDLDesc; @@ -528,12 +498,6 @@ public DDLWork(HashSet inputs, HashSet outputs, } public DDLWork(HashSet inputs, HashSet outputs, - ShowIndexesDesc showIndexesDesc) { - this(inputs, outputs); - this.showIndexesDesc = showIndexesDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, AlterTablePartMergeFilesDesc mergeDesc) { this(inputs, outputs); this.mergeFilesDesc = mergeDesc; @@ -725,36 +689,6 @@ public void setCreateTblDesc(CreateTableDesc createTblDesc) { } /** - * @return the createIndexDesc - */ - public CreateIndexDesc getCreateIndexDesc() { - return createIndexDesc; - } - - /** - * @param createIndexDesc - * the createIndexDesc to set - */ - public void setCreateIndexDesc(CreateIndexDesc createIndexDesc) { - this.createIndexDesc = createIndexDesc; - } - - /** - * @return the alterIndexDesc - */ - public AlterIndexDesc getAlterIndexDesc() { - return alterIndexDesc; - } - - /** - * @param alterIndexDesc - * the alterIndexDesc to set - */ - public void setAlterIndexDesc(AlterIndexDesc alterIndexDesc) { - this.alterIndexDesc = alterIndexDesc; - } - - /** * @return the createTblDesc */ @Explain(displayName = "Create Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -1041,18 +975,6 @@ public void setShowCreateTblDesc(ShowCreateTableDesc showCreateTblDesc) { } /** - * @return the showIndexesDesc - */ - @Explain(displayName = "Show Index Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public ShowIndexesDesc getShowIndexesDesc() { - return showIndexesDesc; - } - - public void setShowIndexesDesc(ShowIndexesDesc showIndexesDesc) { - this.showIndexesDesc = showIndexesDesc; - } - - /** * @return the descTblDesc */ @Explain(displayName = "Describe Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -1172,14 +1094,6 @@ public void setOutputs(HashSet outputs) { this.outputs = outputs; } - public DropIndexDesc getDropIdxDesc() { - return dropIdxDesc; - } - - public void setDropIdxDesc(DropIndexDesc dropIdxDesc) { - this.dropIdxDesc = dropIdxDesc; - } - /** * @return role ddl desc */ diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropIndexDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/DropIndexDesc.java deleted file mode 100644 index 58ac328bdf..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropIndexDesc.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.plan; - -public class DropIndexDesc { - - private static final long serialVersionUID = 1L; - - private String indexName; - - private String tableName; - - private boolean throwException; - - /** - * @param indexName - * @param tableName - */ - public DropIndexDesc(String indexName, String tableName, boolean throwException) { - this.indexName = indexName; - this.tableName = tableName; - this.throwException = throwException; - } - - /** - * @return index name - */ - public String getIndexName() { - return indexName; - } - - /** - * @param indexName index name - */ - public void setIndexName(String indexName) { - this.indexName = indexName; - } - - /** - * @return table name - */ - public String getTableName() { - return tableName; - } - - /** - * @param tableName table name - */ - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public boolean isThrowException() { - return throwException; - } - - public void setThrowException(boolean throwException) { - this.throwException = throwException; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java index 3938bd5ae8..a9e5c8cffe 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java @@ -71,7 +71,6 @@ SHOW_CREATEDATABASE("SHOW_CREATEDATABASE", new Privilege[]{Privilege.SELECT}, null), SHOW_CREATETABLE("SHOW_CREATETABLE", new Privilege[]{Privilege.SELECT}, null), SHOWFUNCTIONS("SHOWFUNCTIONS", null, null, true, false), - SHOWINDEXES("SHOWINDEXES", null, null, true, false), SHOWPARTITIONS("SHOWPARTITIONS", null, null), SHOWLOCKS("SHOWLOCKS", null, null, true, false), SHOWCONF("SHOWCONF", null, null), @@ -89,9 +88,6 @@ DROP_MATERIALIZED_VIEW("DROP_MATERIALIZED_VIEW", null, new Privilege[]{Privilege.DROP}), ALTER_MATERIALIZED_VIEW_REWRITE("ALTER_MATERIALIZED_VIEW_REWRITE", new Privilege[]{Privilege.ALTER_METADATA}, null), - CREATEINDEX("CREATEINDEX", null, null), - DROPINDEX("DROPINDEX", null, null), - ALTERINDEX_REBUILD("ALTERINDEX_REBUILD", null, null), ALTERVIEW_PROPERTIES("ALTERVIEW_PROPERTIES", null, null), DROPVIEW_PROPERTIES("DROPVIEW_PROPERTIES", null, null), LOCKTABLE("LOCKTABLE", new Privilege[]{Privilege.LOCK}, null), @@ -114,7 +110,6 @@ TRUNCATETABLE("TRUNCATETABLE", null, new Privilege[]{Privilege.DROP}), CREATETABLE_AS_SELECT("CREATETABLE_AS_SELECT", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.CREATE}), QUERY("QUERY", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.ALTER_DATA, Privilege.CREATE}, true, false), - ALTERINDEX_PROPS("ALTERINDEX_PROPS",null, null), ALTERDATABASE("ALTERDATABASE", null, null), ALTERDATABASE_OWNER("ALTERDATABASE_OWNER", null, null), ALTERDATABASE_LOCATION("ALTERDATABASE_LOCATION", new Privilege[]{Privilege.ALTER_DATA}, null), diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java index fa7a8a3b84..044f9b2df4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java @@ -99,8 +99,6 @@ private String inputformat; - private String indexIntermediateFile; - private Integer numMapTasks; private Long maxSplitSize; private Long minSplitSize; @@ -564,10 +562,6 @@ public boolean isMapperCannotSpanPartns() { return this.mapperCannotSpanPartns; } - public String getIndexIntermediateFile() { - return indexIntermediateFile; - } - public ArrayList getAliases() { return new ArrayList(aliasToWork.keySet()); } @@ -618,14 +612,6 @@ public void mergingInto(MapWork mapWork) { return sortedColsByDirectory; } - public void addIndexIntermediateFile(String fileName) { - if (this.indexIntermediateFile == null) { - this.indexIntermediateFile = fileName; - } else { - this.indexIntermediateFile += "," + fileName; - } - } - public int getSamplingType() { return samplingType; } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowIndexesDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowIndexesDesc.java deleted file mode 100644 index e18a94c493..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowIndexesDesc.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - - -/** - * ShowIndexesDesc. - * Returns table index information per SQL syntax. - */ -@Explain(displayName = "Show Indexes", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowIndexesDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 1L; - String tableName; - String resFile; - boolean isFormatted; - - /** - * thrift ddl for the result of show indexes. - */ - private static final String schema = "idx_name,tab_name,col_names,idx_tab_name,idx_type,comment" - + "#string:string:string:string:string:string"; - - public static String getSchema() { - return schema; - } - - public String getTableName() { - return tableName; - } - - public String getResFile() { - return resFile; - } - - public boolean isFormatted() { - return isFormatted; - } - - public void setFormatted(boolean isFormatted) { - this.isFormatted = isFormatted; - } - - /** - * - * @param tableName - * Name of the table whose indexes need to be listed. - * @param resFile - * File to store the results in. - */ - public ShowIndexesDesc(String tableName, Path resFile) { - this.tableName = tableName; - this.resFile = resFile.toString(); - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java index 2accad3a7d..320711912e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java @@ -653,6 +653,7 @@ protected Object handlePredicates(Node nd, ExprWalkerInfo prunePreds, OpWalkerIn } public static class ReduceSinkPPD extends DefaultPPD implements NodeProcessor { + @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { super.process(nd, stack, procCtx, nodeOutputs); @@ -790,7 +791,9 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * @param ewi */ protected void logExpr(Node nd, ExprWalkerInfo ewi) { - if (!LOG.isDebugEnabled()) return; + if (!LOG.isDebugEnabled()) { + return; + } for (Entry> e : ewi.getFinalCandidates().entrySet()) { StringBuilder sb = new StringBuilder("Pushdown predicates of ").append(nd.getName()) .append(" for alias ").append(e.getKey()).append(": "); @@ -976,12 +979,7 @@ private static ExprNodeGenericFuncDesc pushFilterToStorageHandler( TableScanDesc tableScanDesc = tableScanOp.getConf(); Table tbl = tableScanDesc.getTableMetadata(); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTINDEXFILTER)) { - // attach the original predicate to the table scan operator for index - // optimizations that require the pushed predicate before pcr & later - // optimizations are applied - tableScanDesc.setFilterExpr(originalPredicate); - } + tableScanDesc.setFilterExpr(originalPredicate); if (!tbl.isNonNative()) { return originalPredicate; } diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/Privilege.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/Privilege.java index 6000590b70..1e9c639460 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/Privilege.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/Privilege.java @@ -101,9 +101,6 @@ public Privilege() { public static Privilege DROP = new Privilege(PrivilegeType.DROP, PrivilegeScope.ALLSCOPE_EXCEPT_COLUMN); - public static Privilege INDEX = new Privilege(PrivilegeType.INDEX, - PrivilegeScope.ALLSCOPE); - public static Privilege LOCK = new Privilege(PrivilegeType.LOCK, PrivilegeScope.ALLSCOPE_EXCEPT_COLUMN); diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeRegistry.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeRegistry.java index 30409389d4..27c7986f3c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeRegistry.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeRegistry.java @@ -48,7 +48,6 @@ public static Privilege getPrivilege(PrivilegeType privilegeType) { Registry.put(Privilege.ALTER_METADATA.getPriv(), Privilege.ALTER_METADATA); Registry.put(Privilege.CREATE.getPriv(), Privilege.CREATE); Registry.put(Privilege.DROP.getPriv(), Privilege.DROP); - Registry.put(Privilege.INDEX.getPriv(), Privilege.INDEX); Registry.put(Privilege.LOCK.getPriv(), Privilege.LOCK); Registry.put(Privilege.SELECT.getPriv(), Privilege.SELECT); Registry.put(Privilege.SHOW_DATABASE.getPriv(), diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java index 56b6bf6476..7678e8f1f8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java @@ -33,7 +33,6 @@ ALTER_METADATA(HiveParser.TOK_PRIV_ALTER_METADATA, "Alter"), CREATE(HiveParser.TOK_PRIV_CREATE, "Create"), DROP(HiveParser.TOK_PRIV_DROP, "Drop"), - INDEX(HiveParser.TOK_PRIV_INDEX, "Index"), LOCK(HiveParser.TOK_PRIV_LOCK, "Lock"), SELECT(HiveParser.TOK_PRIV_SELECT, "Select"), SHOW_DATABASE(HiveParser.TOK_PRIV_SHOW_DATABASE, "Show_Database"), diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java index d2f1716b44..b66d1881b0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.ql.security.authorization; -import java.io.FileNotFoundException; import java.io.IOException; import java.security.AccessControlException; import java.util.ArrayList; @@ -293,9 +292,6 @@ protected FsAction getFsAction(Privilege priv) { return FsAction.WRITE; case DROP: return FsAction.WRITE; - case INDEX: - throw new AuthorizationException( - "StorageBasedAuthorizationProvider cannot handle INDEX privilege"); case LOCK: throw new AuthorizationException( "StorageBasedAuthorizationProvider cannot handle LOCK privilege"); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/AbstractGenericUDFEWAHBitmapBop.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/AbstractGenericUDFEWAHBitmapBop.java deleted file mode 100644 index aa5be09373..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/AbstractGenericUDFEWAHBitmapBop.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.udf.generic; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import javaewah.EWAHCompressedBitmap; - -import org.apache.hadoop.hive.ql.exec.UDFArgumentException; -import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; -import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.index.bitmap.BitmapObjectInput; -import org.apache.hadoop.hive.ql.index.bitmap.BitmapObjectOutput; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils; -import org.apache.hadoop.io.LongWritable; - -/** - * An abstract class for a UDF that performs a binary operation between two EWAH-compressed bitmaps. - * For example: Bitmap OR and AND operations between two EWAH-compressed bitmaps. - */ -abstract public class AbstractGenericUDFEWAHBitmapBop extends GenericUDF { - protected final ArrayList ret = new ArrayList(); - private transient ObjectInspector b1OI; - private final String name; - - AbstractGenericUDFEWAHBitmapBop(String name) { - this.name = name; - } - - @Override - public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { - if (arguments.length != 2) { - throw new UDFArgumentLengthException( - "The function " + name + "(b1, b2) takes exactly 2 arguments"); - } - - if (arguments[0].getCategory().equals(Category.LIST)) { - b1OI = (ListObjectInspector) arguments[0]; - } else { - throw new UDFArgumentTypeException(0, "\"" - + Category.LIST.toString().toLowerCase() - + "\" is expected at function " + name + ", but \"" - + arguments[0].getTypeName() + "\" is found"); - } - - if (!arguments[1].getCategory().equals(Category.LIST)) { - throw new UDFArgumentTypeException(1, "\"" - + Category.LIST.toString().toLowerCase() - + "\" is expected at function " + name + ", but \"" - + arguments[1].getTypeName() + "\" is found"); - - } - return ObjectInspectorFactory - .getStandardListObjectInspector(PrimitiveObjectInspectorFactory - .writableLongObjectInspector); - } - - protected abstract EWAHCompressedBitmap bitmapBop( - EWAHCompressedBitmap bitmap1, EWAHCompressedBitmap bitmap2); - - @Override - public Object evaluate(DeferredObject[] arguments) throws HiveException { - assert (arguments.length == 2); - Object b1 = arguments[0].get(); - Object b2 = arguments[1].get(); - - EWAHCompressedBitmap bitmap1 = wordArrayToBitmap(b1); - EWAHCompressedBitmap bitmap2 = wordArrayToBitmap(b2); - - EWAHCompressedBitmap bitmapAnd = bitmapBop(bitmap1, bitmap2); - - BitmapObjectOutput bitmapObjOut = new BitmapObjectOutput(); - try { - bitmapAnd.writeExternal(bitmapObjOut); - } catch (IOException e) { - throw new RuntimeException(e); - } - ret.clear(); - List retList = bitmapToWordArray(bitmapAnd); - for (LongWritable l : retList) { - ret.add(l); - } - return ret; - } - - protected EWAHCompressedBitmap wordArrayToBitmap(Object b) { - ListObjectInspector lloi = (ListObjectInspector) b1OI; - int length = lloi.getListLength(b); - ArrayList bitmapArray = new ArrayList(); - for (int i = 0; i < length; i++) { - long l = PrimitiveObjectInspectorUtils.getLong( - lloi.getListElement(b, i), - (PrimitiveObjectInspector) lloi.getListElementObjectInspector()); - bitmapArray.add(new LongWritable(l)); - } - - BitmapObjectInput bitmapObjIn = new BitmapObjectInput(bitmapArray); - EWAHCompressedBitmap bitmap = new EWAHCompressedBitmap(); - try { - bitmap.readExternal(bitmapObjIn); - } catch (IOException e) { - throw new RuntimeException(e); - } - return bitmap; - } - - protected List bitmapToWordArray(EWAHCompressedBitmap bitmap) { - BitmapObjectOutput bitmapObjOut = new BitmapObjectOutput(); - try { - bitmap.writeExternal(bitmapObjOut); - } catch (IOException e) { - throw new RuntimeException(e); - } - return bitmapObjOut.list(); - } - - @Override - public String getDisplayString(String[] children) { - return getStandardDisplayString(name, children, ","); - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEWAHBitmap.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEWAHBitmap.java deleted file mode 100644 index fabeeccef2..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEWAHBitmap.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.udf.generic; - -import java.io.IOException; -import java.util.ArrayList; - -import javaewah.EWAHCompressedBitmap; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.ql.exec.Description; -import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.index.bitmap.BitmapObjectInput; -import org.apache.hadoop.hive.ql.index.bitmap.BitmapObjectOutput; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; -import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.StandardListObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.util.StringUtils; - - -/** - * GenericUDAFEWAHBitmap. - * - */ -@Description(name = "ewah_bitmap", value = "_FUNC_(expr) - Returns an EWAH-compressed bitmap representation of a column.") -public class GenericUDAFEWAHBitmap extends AbstractGenericUDAFResolver { - - static final Logger LOG = LoggerFactory.getLogger(GenericUDAFEWAHBitmap.class.getName()); - - @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException { - if (parameters.length != 1) { - throw new UDFArgumentTypeException(parameters.length - 1, - "Exactly one argument is expected."); - } - ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(parameters[0]); - if (!ObjectInspectorUtils.compareSupported(oi)) { - throw new UDFArgumentTypeException(parameters.length - 1, - "Cannot support comparison of map<> type or complex type containing map<>."); - } - return new GenericUDAFEWAHBitmapEvaluator(); - } - - //The UDAF evaluator assumes that all rows it's evaluating have - //the same (desired) value. - public static class GenericUDAFEWAHBitmapEvaluator extends GenericUDAFEvaluator { - - // For PARTIAL1 and COMPLETE: ObjectInspectors for original data - private PrimitiveObjectInspector inputOI; - - // For PARTIAL2 and FINAL: ObjectInspectors for partial aggregations - // (lists of bitmaps) - private transient StandardListObjectInspector loi; - private transient StandardListObjectInspector internalMergeOI; - - @Override - public ObjectInspector init(Mode m, ObjectInspector[] parameters) - throws HiveException { - super.init(m, parameters); - // init output object inspectors - // The output of a partial aggregation is a list - if (m == Mode.PARTIAL1) { - inputOI = (PrimitiveObjectInspector) parameters[0]; - return ObjectInspectorFactory - .getStandardListObjectInspector(PrimitiveObjectInspectorFactory.writableLongObjectInspector); - } else if (m == Mode.PARTIAL2 || m == Mode.FINAL) { - internalMergeOI = (StandardListObjectInspector) parameters[0]; - inputOI = (PrimitiveObjectInspector)internalMergeOI.getListElementObjectInspector(); - loi = (StandardListObjectInspector) ObjectInspectorFactory - .getStandardListObjectInspector(PrimitiveObjectInspectorFactory.writableLongObjectInspector); - return loi; - } else { // Mode.COMPLETE, ie. no map-side aggregation, requires ordering - inputOI = (PrimitiveObjectInspector)parameters[0]; - loi = (StandardListObjectInspector) ObjectInspectorFactory - .getStandardListObjectInspector(PrimitiveObjectInspectorFactory.writableLongObjectInspector); - return loi; - } - } - - /** class for storing the current partial result aggregation */ - @AggregationType(estimable = true) - static class BitmapAgg extends AbstractAggregationBuffer { - EWAHCompressedBitmap bitmap; - @Override - public int estimate() { - return bitmap.sizeInBytes(); - } - } - - @Override - public void reset(AggregationBuffer agg) throws HiveException { - - ((BitmapAgg) agg).bitmap = new EWAHCompressedBitmap(); - } - - @Override - public AggregationBuffer getNewAggregationBuffer() throws HiveException { - BitmapAgg result = new BitmapAgg(); - reset(result); - return result; - } - - @Override - public void iterate(AggregationBuffer agg, Object[] parameters) - throws HiveException { - assert (parameters.length == 1); - Object p = parameters[0]; - if (p != null) { - BitmapAgg myagg = (BitmapAgg) agg; - try { - int row = PrimitiveObjectInspectorUtils.getInt(p, inputOI); - addBitmap(row, myagg); - } catch (NumberFormatException e) { - LOG.warn(getClass().getSimpleName() + " " + - StringUtils.stringifyException(e)); - } - } - } - - - @Override - public Object terminate(AggregationBuffer agg) throws HiveException { - BitmapAgg myagg = (BitmapAgg) agg; - - BitmapObjectOutput bitmapObjOut = new BitmapObjectOutput(); - try { - myagg.bitmap.writeExternal(bitmapObjOut); - } catch (IOException e) { - throw new RuntimeException(e); - } - return bitmapObjOut.list(); - } - - @Override - public void merge(AggregationBuffer agg, Object partial) - throws HiveException { - BitmapAgg myagg = (BitmapAgg) agg; - ArrayList partialResult = (ArrayList) internalMergeOI.getList(partial); - BitmapObjectInput bitmapObjIn = new BitmapObjectInput(partialResult); - EWAHCompressedBitmap partialBitmap = new EWAHCompressedBitmap(); - try { - partialBitmap.readExternal(bitmapObjIn); - } catch (IOException e) { - throw new RuntimeException(e); - } - myagg.bitmap = myagg.bitmap.or(partialBitmap); - } - - @Override - public Object terminatePartial(AggregationBuffer agg) throws HiveException { - BitmapAgg myagg = (BitmapAgg) agg; - BitmapObjectOutput bitmapObjOut = new BitmapObjectOutput(); - try { - myagg.bitmap.writeExternal(bitmapObjOut); - } catch (IOException e) { - throw new RuntimeException(e); - } - return bitmapObjOut.list(); - } - - private void addBitmap(int newRow, BitmapAgg myagg) { - if (!myagg.bitmap.set(newRow)) { - throw new RuntimeException("Can't set bits out of order with EWAHCompressedBitmap"); - } - } - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapAnd.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapAnd.java deleted file mode 100644 index 976fa18460..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapAnd.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.udf.generic; - -import javaewah.EWAHCompressedBitmap; - -import org.apache.hadoop.hive.ql.exec.Description; - -/** - * GenericEWAHUDFBitmapAnd. - * - */ -@Description(name = "ewah_bitmap_and", - value = "_FUNC_(b1, b2) - Return an EWAH-compressed bitmap that is the bitwise AND of two bitmaps.") -public class GenericUDFEWAHBitmapAnd extends AbstractGenericUDFEWAHBitmapBop { - - public GenericUDFEWAHBitmapAnd() { - super("EWAH_BITMAP_AND"); - } - - @Override - protected EWAHCompressedBitmap bitmapBop( - EWAHCompressedBitmap bitmap1, EWAHCompressedBitmap bitmap2) { - return bitmap1.and(bitmap2); - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapEmpty.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapEmpty.java deleted file mode 100644 index aab6e82373..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapEmpty.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.udf.generic; - -import java.io.IOException; -import java.util.ArrayList; - -import javaewah.EWAHCompressedBitmap; - -import org.apache.hadoop.hive.ql.exec.Description; -import org.apache.hadoop.hive.ql.exec.UDFArgumentException; -import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; -import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.index.bitmap.BitmapObjectInput; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; -import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.BooleanObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils; -import org.apache.hadoop.io.BooleanWritable; -import org.apache.hadoop.io.LongWritable; - -@Description(name = "ewah_bitmap_empty", value = "_FUNC_(bitmap) - " - + "Predicate that tests whether an EWAH-compressed bitmap is all zeros ") -public class GenericUDFEWAHBitmapEmpty extends GenericUDF { - private transient ObjectInspector bitmapOI; - private transient BooleanObjectInspector boolOI; - -@Override -public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { - if (arguments.length != 1) { - throw new UDFArgumentLengthException( - "The function EWAH_BITMAP_EMPTY(b) takes exactly 1 argument"); - } - - if (arguments[0].getCategory().equals(Category.LIST)) { - bitmapOI = (ListObjectInspector) arguments[0]; - } else { - throw new UDFArgumentTypeException(0, "\"" - + Category.LIST.toString().toLowerCase() - + "\" is expected at function EWAH_BITMAP_EMPTY, but \"" - + arguments[0].getTypeName() + "\" is found"); - } - - boolOI = PrimitiveObjectInspectorFactory.writableBooleanObjectInspector; - return boolOI; - } - - @Override - public Object evaluate(DeferredObject[] arguments) throws HiveException { - assert (arguments.length == 1); - Object b = arguments[0].get(); - - ListObjectInspector lloi = (ListObjectInspector) bitmapOI; - int length = lloi.getListLength(b); - ArrayList bitmapArray = new ArrayList(); - for (int i = 0; i < length; i++) { - long l = PrimitiveObjectInspectorUtils.getLong( - lloi.getListElement(b, i), - (PrimitiveObjectInspector) lloi.getListElementObjectInspector()); - bitmapArray.add(new LongWritable(l)); - } - - BitmapObjectInput bitmapObjIn = new BitmapObjectInput(bitmapArray); - EWAHCompressedBitmap bitmap = new EWAHCompressedBitmap(); - try { - bitmap.readExternal(bitmapObjIn); - } catch (IOException e) { - throw new RuntimeException(e); - } - - // Add return true only if bitmap is all zeros. - return new BooleanWritable(!bitmap.iterator().hasNext()); - } - - - @Override - public String getDisplayString(String[] children) { - return getStandardDisplayString("EWAH_BITMAP_EMPTY", children); - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapOr.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapOr.java deleted file mode 100644 index 33d6be6ec6..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapOr.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.udf.generic; - -import javaewah.EWAHCompressedBitmap; - -import org.apache.hadoop.hive.ql.exec.Description; - -/** - * GenericUDFEWAHBitmapOr. - * - */ -@Description(name = "ewah_bitmap_or", - value = "_FUNC_(b1, b2) - Return an EWAH-compressed bitmap that is the bitwise OR of two bitmaps.") -public class GenericUDFEWAHBitmapOr extends AbstractGenericUDFEWAHBitmapBop { - - public GenericUDFEWAHBitmapOr() { - super("EWAH_BITMAP_OR"); - } - - @Override - protected EWAHCompressedBitmap bitmapBop( - EWAHCompressedBitmap bitmap1, EWAHCompressedBitmap bitmap2) { - return bitmap1.or(bitmap2); - } -} diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java index 048215aa37..d0a1768ea8 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java @@ -190,8 +190,6 @@ public void testOrcNoPPD() throws Exception { * @throws Exception */ private void testOrcPPD(boolean enablePPD) throws Exception { - boolean originalPpd = hiveConf.getBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER, enablePPD);//enables ORC PPD //create delta_0001_0001_0000 (should push predicate here) runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(new int[][]{{1, 2}, {3, 4}})); List explain; @@ -256,7 +254,6 @@ private void testOrcPPD(boolean enablePPD) throws Exception { List rs1 = runStatementOnDriver(query); int [][] resultData = new int[][] {{3, 5}, {5, 6}, {9, 10}}; Assert.assertEquals("Update failed", stringifyValues(resultData), rs1); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER, originalPpd); } static void assertExplainHasString(String string, List queryPlan, String errMsg) { @@ -1414,7 +1411,6 @@ protected void testACIDwithSchemaEvolutionForVariousTblProperties(String tblProp @Test public void testETLSplitStrategyForACID() throws Exception { hiveConf.setVar(HiveConf.ConfVars.HIVE_ORC_SPLIT_STRATEGY, "ETL"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER, true); runStatementOnDriver("insert into " + Table.ACIDTBL + " values(1,2)"); runStatementOnDriver("alter table " + Table.ACIDTBL + " compact 'MAJOR'"); runWorker(hiveConf); diff --git ql/src/test/org/apache/hadoop/hive/ql/index/MockIndexResult.java ql/src/test/org/apache/hadoop/hive/ql/index/MockIndexResult.java deleted file mode 100644 index 808cb6a0d1..0000000000 --- ql/src/test/org/apache/hadoop/hive/ql/index/MockIndexResult.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.index; - -import com.google.common.collect.ImmutableSet; -import java.util.Collection; -import org.apache.hadoop.hive.ql.io.HiveInputFormat.HiveInputSplit; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.mapred.FileSplit; - -public final class MockIndexResult implements IndexResult { - - private final ImmutableSet selectedSplits; - - public MockIndexResult(Collection selectedSplits) { - this.selectedSplits = ImmutableSet.copyOf(selectedSplits); - } - - @Override - public boolean contains(FileSplit split) throws HiveException { - return selectedSplits.contains(split); - } -} \ No newline at end of file diff --git ql/src/test/org/apache/hadoop/hive/ql/index/MockInputFile.java ql/src/test/org/apache/hadoop/hive/ql/index/MockInputFile.java index 4804e366e6..405efdfcf9 100644 --- ql/src/test/org/apache/hadoop/hive/ql/index/MockInputFile.java +++ ql/src/test/org/apache/hadoop/hive/ql/index/MockInputFile.java @@ -74,7 +74,8 @@ public static PathStep builder() { DefaultSplitLengthStep { private String path; - private long defaultSplitSize = SplitFilterTestCase.DEFAULT_SPLIT_SIZE;; + public static final long DEFAULT_SPLIT_SIZE = 1024 * 1024; + private long defaultSplitSize = DEFAULT_SPLIT_SIZE; private final List splits = new ArrayList<>(); private final List selectedSplits = new ArrayList<>(); private long position = 0; diff --git ql/src/test/org/apache/hadoop/hive/ql/index/SplitFilterTestCase.java ql/src/test/org/apache/hadoop/hive/ql/index/SplitFilterTestCase.java deleted file mode 100644 index fdd07313e2..0000000000 --- ql/src/test/org/apache/hadoop/hive/ql/index/SplitFilterTestCase.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.index; - -import com.google.common.collect.ImmutableSet; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import org.apache.hadoop.hive.ql.io.HiveInputFormat.HiveInputSplit; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - -public final class SplitFilterTestCase { - public static final long DEFAULT_SPLIT_SIZE = 1024 * 1024; - public static final long SMALL_SPLIT_SIZE = 500; - - private final Set allSplits; - private final Set selectedSplits; - private final Set expectedSplits; - private final long maxInputSize; - - private SplitFilterTestCase(Iterable allSplits, - Iterable selectedSplits, Iterable expectedSplits, - long maxInputSize) { - - this.allSplits = ImmutableSet.copyOf(allSplits); - this.selectedSplits = ImmutableSet.copyOf(selectedSplits); - this.expectedSplits = ImmutableSet.copyOf(expectedSplits); - this.maxInputSize = maxInputSize; - } - - private HiveInputSplit[] toArray(Collection splits) { - return splits.toArray(new HiveInputSplit[splits.size()]); - } - - public void executeAndValidate() throws IOException { - SplitFilter filter = new SplitFilter(new MockIndexResult(selectedSplits), maxInputSize); - List actualSplits = filter.filter(toArray(allSplits)); - assertSplits(expectedSplits, actualSplits); - } - - private void assertSplits(Collection expectedSplits, - Collection actualSplits) { - SplitFilter.HiveInputSplitComparator hiveInputSplitComparator = - new SplitFilter.HiveInputSplitComparator(); - - List sortedExpectedSplits = new ArrayList<>(expectedSplits); - Collections.sort(sortedExpectedSplits, hiveInputSplitComparator); - - List sortedActualSplits = new ArrayList<>(actualSplits); - Collections.sort(sortedActualSplits, hiveInputSplitComparator); - - assertEquals("Number of selected splits.", sortedExpectedSplits.size(), - sortedActualSplits.size()); - - for (int i = 0; i < sortedExpectedSplits.size(); i++) { - HiveInputSplit expectedSplit = sortedExpectedSplits.get(i); - HiveInputSplit actualSplit = sortedActualSplits.get(i); - - String splitName = "Split #" + i; - - assertEquals(splitName + " path.", expectedSplit.getPath(), actualSplit.getPath()); - assertEquals(splitName + " start.", expectedSplit.getStart(), actualSplit.getStart()); - assertEquals(splitName + " length.", expectedSplit.getLength(), actualSplit.getLength()); - } - } - - public static MaxInputSizeStep builder() { - return new SplitFilterTestCaseBuilder(); - } - - public static interface MaxInputSizeStep extends InputFilesStep { - InputFilesStep maxInputSize(long maxInputSize); - } - - public static interface InputFilesStep { - ExpectedSplitsStep inputFiles(MockInputFile... inputFiles); - } - - public static interface ExpectedSplitsStep { - BuildStep expectedSplits(HiveInputSplit... expectedSplits); - } - - public static interface BuildStep { - SplitFilterTestCase build(); - } - - private static final class SplitFilterTestCaseBuilder implements MaxInputSizeStep, InputFilesStep, - ExpectedSplitsStep, BuildStep { - - private long maxInputSize = Long.MAX_VALUE; - private List inputFiles; - private List expectedSplits; - - @Override - public InputFilesStep maxInputSize(long maxInputSize) { - this.maxInputSize = maxInputSize; - return this; - } - - @Override - public ExpectedSplitsStep inputFiles(MockInputFile... inputFiles) { - this.inputFiles = Arrays.asList(inputFiles); - return this; - } - - @Override - public BuildStep expectedSplits(HiveInputSplit... expectedSplits) { - this.expectedSplits = Arrays.asList(expectedSplits); - return this; - } - - @Override - public SplitFilterTestCase build() { - List allSplits = new ArrayList<>(); - List selectedSplits = new ArrayList<>(); - Set seenPaths = new HashSet(); - - for (MockInputFile inputFile : inputFiles) { - if (seenPaths.add(inputFile.getPath())) { - allSplits.addAll(inputFile.getSplits()); - selectedSplits.addAll(inputFile.getSelectedSplits()); - } else { - fail(String.format("Cannot add 2 input files with the same path to a test case. " + - "The duplicated path is '%s'.", inputFile.getPath())); - } - } - - return new SplitFilterTestCase(allSplits, selectedSplits, expectedSplits, maxInputSize); - } - } -} diff --git ql/src/test/org/apache/hadoop/hive/ql/index/TestHiveInputSplitComparator.java ql/src/test/org/apache/hadoop/hive/ql/index/TestHiveInputSplitComparator.java index 3fc18e9ae1..feb5ea92d1 100644 --- ql/src/test/org/apache/hadoop/hive/ql/index/TestHiveInputSplitComparator.java +++ ql/src/test/org/apache/hadoop/hive/ql/index/TestHiveInputSplitComparator.java @@ -18,8 +18,9 @@ package org.apache.hadoop.hive.ql.index; import java.util.Arrays; + import org.apache.hadoop.hive.ql.io.HiveInputFormat.HiveInputSplit; -import org.apache.hadoop.hive.ql.index.SplitFilter.HiveInputSplitComparator; +import org.apache.hadoop.hive.ql.io.HiveInputFormat.HiveInputSplitComparator; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; diff --git ql/src/test/org/apache/hadoop/hive/ql/index/TestIndexType.java ql/src/test/org/apache/hadoop/hive/ql/index/TestIndexType.java deleted file mode 100644 index befb10337d..0000000000 --- ql/src/test/org/apache/hadoop/hive/ql/index/TestIndexType.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.index; - -import junit.framework.TestCase; -import org.apache.hadoop.hive.ql.index.bitmap.BitmapIndexHandler; -import org.apache.hadoop.hive.ql.index.compact.CompactIndexHandler; -import org.junit.Test; - -public class TestIndexType extends TestCase { - - @Test - public void testIndexTypeHandlers(){ - assertEquals(HiveIndex.IndexType.AGGREGATE_TABLE.getHandlerClsName(), AggregateIndexHandler.class.getName()); - assertEquals(HiveIndex.IndexType.BITMAP_TABLE.getHandlerClsName(), BitmapIndexHandler.class.getName()); - assertEquals(HiveIndex.IndexType.COMPACT_SUMMARY_TABLE.getHandlerClsName(), CompactIndexHandler.class.getName()); - } - -} diff --git ql/src/test/org/apache/hadoop/hive/ql/index/TestSplitFilter.java ql/src/test/org/apache/hadoop/hive/ql/index/TestSplitFilter.java deleted file mode 100644 index b5114e9904..0000000000 --- ql/src/test/org/apache/hadoop/hive/ql/index/TestSplitFilter.java +++ /dev/null @@ -1,296 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.index; - -import java.io.IOException; -import org.junit.Test; - -import static org.apache.hadoop.hive.ql.index.MockHiveInputSplits.createMockSplit; -import static org.apache.hadoop.io.SequenceFile.SYNC_INTERVAL; -import static org.apache.hadoop.hive.ql.index.SplitFilterTestCase.DEFAULT_SPLIT_SIZE; -import static org.apache.hadoop.hive.ql.index.SplitFilterTestCase.SMALL_SPLIT_SIZE; - -public class TestSplitFilter { - private SplitFilterTestCase testCase; - - @Test - public void testOneSelectedSplitsInMiddle() throws Exception { - testCase = SplitFilterTestCase.builder() - .inputFiles( - MockInputFile.builder() - .path("A") - .split() - .selectedSplit() - .split() - .build() - ) - .expectedSplits( - createMockSplit("A", DEFAULT_SPLIT_SIZE - SYNC_INTERVAL, DEFAULT_SPLIT_SIZE + SYNC_INTERVAL) - ) - .build(); - - testCase.executeAndValidate(); - } - - @Test - public void testSelectedFirstSplit() throws Exception { - testCase = SplitFilterTestCase.builder() - .inputFiles( - MockInputFile.builder() - .path("A") - .selectedSplit() - .split() - .split() - .build() - ) - .expectedSplits( - createMockSplit("A", 0, DEFAULT_SPLIT_SIZE) - ) - .build(); - - testCase.executeAndValidate(); - } - - @Test - public void testSelectedLastSplit() throws Exception { - int lastSplitSize = 1234; - - testCase = SplitFilterTestCase.builder() - .inputFiles( - MockInputFile.builder() - .path("A") - .split() - .selectedSplit(lastSplitSize) - .build() - ) - .expectedSplits( - createMockSplit("A", DEFAULT_SPLIT_SIZE - SYNC_INTERVAL, lastSplitSize + SYNC_INTERVAL) - ) - .build(); - - testCase.executeAndValidate(); - } - - @Test - public void testSelectedTwoAdjacentSplits() throws Exception { - - testCase = SplitFilterTestCase.builder() - .inputFiles( - MockInputFile.builder() - .path("A") - .selectedSplit() - .selectedSplit() - .split() - .build() - ) - .expectedSplits( - createMockSplit("A", 0, DEFAULT_SPLIT_SIZE), - createMockSplit("A", DEFAULT_SPLIT_SIZE, DEFAULT_SPLIT_SIZE) - ) - .build(); - - testCase.executeAndValidate(); - } - - @Test - public void testSelectedThreeAdjacentSplits() throws Exception { - - testCase = SplitFilterTestCase.builder() - .inputFiles( - MockInputFile.builder() - .path("A") - .selectedSplit() - .selectedSplit() - .selectedSplit() - .split() - .build() - ) - .expectedSplits( - createMockSplit("A", 0, DEFAULT_SPLIT_SIZE), - createMockSplit("A", DEFAULT_SPLIT_SIZE, DEFAULT_SPLIT_SIZE), - createMockSplit("A", DEFAULT_SPLIT_SIZE * 2, DEFAULT_SPLIT_SIZE) - ) - .build(); - - testCase.executeAndValidate(); - } - - @Test - public void testSelectedSplitsInTwoFiles() throws Exception { - - testCase = SplitFilterTestCase.builder() - .inputFiles( - MockInputFile.builder() - .path("A") - .selectedSplit() - .split() - .build(), - MockInputFile.builder() - .path("B") - .selectedSplit() - .build() - ) - .expectedSplits( - createMockSplit("A", 0, DEFAULT_SPLIT_SIZE), - createMockSplit("B", 0, DEFAULT_SPLIT_SIZE) - ) - .build(); - - testCase.executeAndValidate(); - } - - @Test - public void testOverlapWithPreviousFile() throws Exception { - - testCase = SplitFilterTestCase.builder() - .inputFiles( - MockInputFile.builder() - .path("A") - .selectedSplit() - .build(), - MockInputFile.builder() - .path("B") - .split() - .selectedSplit() - .build() - ) - .expectedSplits( - createMockSplit("A", 0, DEFAULT_SPLIT_SIZE), - createMockSplit("B", DEFAULT_SPLIT_SIZE - SYNC_INTERVAL, DEFAULT_SPLIT_SIZE + SYNC_INTERVAL) - ) - .build(); - - testCase.executeAndValidate(); - } - - @Test - public void testOverlapInSecondFile() throws Exception { - - testCase = SplitFilterTestCase.builder() - .inputFiles( - MockInputFile.builder() - .path("A") - .selectedSplit() - .build(), - MockInputFile.builder() - .path("B") - .split() - .selectedSplit() - .selectedSplit() - .build() - ) - .expectedSplits( - createMockSplit("A", 0, DEFAULT_SPLIT_SIZE), - createMockSplit("B", DEFAULT_SPLIT_SIZE - SYNC_INTERVAL, DEFAULT_SPLIT_SIZE + SYNC_INTERVAL), - createMockSplit("B", DEFAULT_SPLIT_SIZE * 2, DEFAULT_SPLIT_SIZE) - ) - .build(); - - testCase.executeAndValidate(); - } - - @Test - public void testSmallSplitsLengthAdjustment() throws Exception { - - testCase = SplitFilterTestCase.builder() - .inputFiles( - MockInputFile.builder() - .path("A") - .defaultSplitLength(SMALL_SPLIT_SIZE) - .split() - .selectedSplit() - .build() - ) - .expectedSplits( - createMockSplit("A", 0, SMALL_SPLIT_SIZE * 2) - ) - .build(); - - testCase.executeAndValidate(); - } - - @Test - public void testSmallSplitsOverlap() throws Exception { - - testCase = SplitFilterTestCase.builder() - .inputFiles( - MockInputFile.builder() - .path("A") - .defaultSplitLength(SMALL_SPLIT_SIZE) - .selectedSplit() - .split() - .selectedSplit() - .split() - .selectedSplit() - .build() - ) - .expectedSplits( - createMockSplit("A", 0, SMALL_SPLIT_SIZE), - createMockSplit("A", SMALL_SPLIT_SIZE * 2, SMALL_SPLIT_SIZE), - createMockSplit("A", SMALL_SPLIT_SIZE * 4, SMALL_SPLIT_SIZE) - ) - .build(); - - testCase.executeAndValidate(); - } - - @Test - public void testMaxSplitsSizePositive() throws Exception { - - testCase = SplitFilterTestCase.builder() - .maxInputSize(DEFAULT_SPLIT_SIZE * 3 + SYNC_INTERVAL * 2) - .inputFiles( - MockInputFile.builder() - .path("A") - .selectedSplit() - .split() - .selectedSplit() - .split() - .selectedSplit() - .build() - ) - .expectedSplits( - createMockSplit("A", 0, DEFAULT_SPLIT_SIZE), - createMockSplit("A", DEFAULT_SPLIT_SIZE * 2 - SYNC_INTERVAL, DEFAULT_SPLIT_SIZE + SYNC_INTERVAL), - createMockSplit("A", DEFAULT_SPLIT_SIZE * 4 - SYNC_INTERVAL, DEFAULT_SPLIT_SIZE + SYNC_INTERVAL) - ) - .build(); - - testCase.executeAndValidate(); - } - - @Test(expected = IOException.class) - public void testMaxSplitsSizeNegative() throws Exception { - testCase = SplitFilterTestCase.builder() - .maxInputSize(DEFAULT_SPLIT_SIZE * 3) - .inputFiles( - MockInputFile.builder() - .path("A") - .selectedSplit() - .split() - .selectedSplit() - .split() - .selectedSplit() - .build() - ) - .expectedSplits() - .build(); - - testCase.executeAndValidate(); - } -} diff --git ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java index f046191ae4..529aca17b2 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java @@ -449,7 +449,9 @@ public Cache getCache(HiveConf conf) throws IOException { for (int i = 0; i < metadatas.length; ++i) { long fileId = fileIds.get(i); ByteBuffer metadata = metadatas[i]; - if (metadata == null) continue; + if (metadata == null) { + continue; + } getHitByExprCount.incrementAndGet(); metadata = eliminated[i] ? null : metadata; MetadataPpdResult mpr = new MetadataPpdResult(); @@ -475,7 +477,9 @@ public void clearFileMetadata(List fileIds) throws HiveException { HashMap result = new HashMap<>(); for (Long id : fileIds) { MockItem mi = cache.get(id); - if (mi == null) continue; + if (mi == null) { + continue; + } getHitCount.incrementAndGet(); result.put(id, mi.data); } @@ -556,7 +560,9 @@ public FsWithHash(FileSplit fs) { } @Override public int hashCode() { - if (fs == null) return 0; + if (fs == null) { + return 0; + } final int prime = 31; int result = prime * 1 + fs.getPath().hashCode(); result = prime * result + Long.valueOf(fs.getStart()).hashCode(); @@ -565,11 +571,19 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (this == obj) return true; - if (!(obj instanceof FsWithHash)) return false; + if (this == obj) { + return true; + } + if (!(obj instanceof FsWithHash)) { + return false; + } FsWithHash other = (FsWithHash)obj; - if ((fs == null) != (other.fs == null)) return false; - if (fs == null && other.fs == null) return true; + if ((fs == null) != (other.fs == null)) { + return false; + } + if (fs == null && other.fs == null) { + return true; + } return fs.getStart() == other.fs.getStart() && fs.getLength() == other.fs.getLength() && fs.getPath().equals(other.fs.getPath()); } @@ -699,7 +713,6 @@ private void setupExternalCacheConfig(boolean isPpd, String paths) { conf.setLong(HiveConf.ConfVars.MAPREDMINSPLITSIZE.varname, 1000); conf.setLong(HiveConf.ConfVars.MAPREDMAXSPLITSIZE.varname, 5000); conf.setBoolean(ConfVars.HIVE_ORC_MS_FOOTER_CACHE_PPD.varname, isPpd); - conf.setBoolean(ConfVars.HIVEOPTINDEXFILTER.varname, isPpd); } private ObjectInspector createIO() { diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index 5355e064f4..cd73dd1460 100755 --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.*; -import org.apache.hadoop.hive.ql.index.HiveIndex; import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsUtils; @@ -685,127 +684,6 @@ public void testPartition() throws Throwable { } } - /** - * Tests creating a simple index on a simple table. - * - * @throws Throwable - */ - public void testIndex() throws Throwable { - try{ - // create a simple table - String tableName = "table_for_testindex"; - String qTableName = Warehouse.DEFAULT_DATABASE_NAME + "." + tableName; - try { - hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); - } catch (HiveException e) { - e.printStackTrace(); - assertTrue("Unable to drop table", false); - } - - Table tbl = new Table(Warehouse.DEFAULT_DATABASE_NAME, tableName); - List fields = tbl.getCols(); - - fields.add(new FieldSchema("col1", serdeConstants.INT_TYPE_NAME, "int -- first column")); - fields.add(new FieldSchema("col2", serdeConstants.STRING_TYPE_NAME, - "string -- second column")); - fields.add(new FieldSchema("col3", serdeConstants.DOUBLE_TYPE_NAME, - "double -- thrift column")); - tbl.setFields(fields); - - tbl.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); - tbl.setInputFormatClass(SequenceFileInputFormat.class); - - // create table - try { - hm.createTable(tbl); - } catch (HiveException e) { - e.printStackTrace(); - assertTrue("Unable to create table: " + tableName, false); - } - - // Create a simple index - String indexName = "index_on_table_for_testindex"; - String indexHandlerClass = HiveIndex.IndexType.COMPACT_SUMMARY_TABLE.getHandlerClsName(); - List indexedCols = new ArrayList(); - indexedCols.add("col1"); - String indexTableName = "index_on_table_for_testindex_table"; - String qIndexTableName = Warehouse.DEFAULT_DATABASE_NAME + "." + indexTableName; - boolean deferredRebuild = true; - String inputFormat = SequenceFileInputFormat.class.getName(); - String outputFormat = SequenceFileOutputFormat.class.getName(); - String serde = null; - String storageHandler = null; - String location = null; - String collItemDelim = null; - String fieldDelim = null; - String fieldEscape = null; - String lineDelim = null; - String mapKeyDelim = null; - String indexComment = null; - Map indexProps = null; - Map tableProps = null; - Map serdeProps = new HashMap(); - hm.createIndex(qTableName, indexName, indexHandlerClass, indexedCols, qIndexTableName, - deferredRebuild, inputFormat, outputFormat, serde, storageHandler, location, - indexProps, tableProps, serdeProps, collItemDelim, fieldDelim, fieldEscape, lineDelim, - mapKeyDelim, indexComment); - - // Retrieve and validate the index - Index index = null; - try { - index = hm.getIndex(tableName, indexName); - assertNotNull("Unable to fetch index", index); - index.validate(); - assertEquals("Index names don't match for index: " + indexName, indexName, - index.getIndexName()); - assertEquals("Table names don't match for index: " + indexName, tableName, - index.getOrigTableName()); - assertEquals("Index table names didn't match for index: " + indexName, indexTableName, - index.getIndexTableName()); - assertEquals("Index handler classes didn't match for index: " + indexName, - indexHandlerClass, index.getIndexHandlerClass()); - assertEquals("Deferred rebuild didn't match for index: " + indexName, deferredRebuild, - index.isDeferredRebuild()); - - } catch (HiveException e) { - System.err.println(StringUtils.stringifyException(e)); - assertTrue("Unable to fetch index correctly: " + indexName, false); - } - - // Drop index - try { - hm.dropIndex(Warehouse.DEFAULT_DATABASE_NAME, tableName, indexName, false, true); - } catch (HiveException e) { - System.err.println(StringUtils.stringifyException(e)); - assertTrue("Unable to drop index: " + indexName, false); - } - - boolean dropIndexException = false; - try { - hm.getIndex(tableName, indexName); - } catch (HiveException e) { - // Expected since it was just dropped - dropIndexException = true; - } - - assertTrue("Unable to drop index: " + indexName, dropIndexException); - - // Drop table - try { - hm.dropTable(tableName); - Table droppedTable = hm.getTable(tableName, false); - assertNull("Unable to drop table " + tableName, droppedTable); - } catch (HiveException e) { - System.err.println(StringUtils.stringifyException(e)); - assertTrue("Unable to drop table: " + tableName, false); - } - } catch (Throwable e) { - System.err.println(StringUtils.stringifyException(e)); - System.err.println("testIndex failed"); - throw e; - } - } - public void testHiveRefreshOnConfChange() throws Throwable{ Hive prevHiveObj = Hive.get(); prevHiveObj.getDatabaseCurrent(); diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV1.java ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV1.java index 87cd98ff40..4a33885d14 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV1.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV1.java @@ -66,7 +66,6 @@ public void testPrivInGrant() throws Exception{ grantUserTable("alter", PrivilegeType.ALTER_METADATA); grantUserTable("create", PrivilegeType.CREATE); grantUserTable("drop", PrivilegeType.DROP); - grantUserTable("index", PrivilegeType.INDEX); grantUserTable("lock", PrivilegeType.LOCK); grantUserTable("select", PrivilegeType.SELECT); grantUserTable("show_database", PrivilegeType.SHOW_DATABASE); diff --git ql/src/test/queries/clientnegative/index_bitmap_no_map_aggr.q ql/src/test/queries/clientnegative/index_bitmap_no_map_aggr.q deleted file mode 100644 index a17cd1fec5..0000000000 --- ql/src/test/queries/clientnegative/index_bitmap_no_map_aggr.q +++ /dev/null @@ -1,7 +0,0 @@ -EXPLAIN -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.map.aggr=false; -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD; -ALTER INDEX src1_index ON src REBUILD; diff --git ql/src/test/queries/clientnegative/index_compact_entry_limit.q ql/src/test/queries/clientnegative/index_compact_entry_limit.q deleted file mode 100644 index 63973e699a..0000000000 --- ql/src/test/queries/clientnegative/index_compact_entry_limit.q +++ /dev/null @@ -1,13 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.dbclass=fs; -drop index src_index on src; - -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` , `_offsets` FROM default__src_src_index__ WHERE key<1000; -SET hive.index.compact.file=${system:test.tmp.dir}/index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat; -SET hive.index.compact.query.max.entries=5; -SELECT key, value FROM src WHERE key=100 ORDER BY key; diff --git ql/src/test/queries/clientnegative/index_compact_size_limit.q ql/src/test/queries/clientnegative/index_compact_size_limit.q deleted file mode 100644 index ae4e265e51..0000000000 --- ql/src/test/queries/clientnegative/index_compact_size_limit.q +++ /dev/null @@ -1,14 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.dbclass=fs; -drop index src_index on src; - -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` , `_offsets` FROM default__src_src_index__ WHERE key<1000; -SET hive.index.compact.file=${system:test.tmp.dir}/index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat; -SET hive.index.compact.query.max.size=1024; -SELECT key, value FROM src WHERE key=100 ORDER BY key; - diff --git ql/src/test/queries/clientpositive/index_auth.q ql/src/test/queries/clientpositive/index_auth.q deleted file mode 100644 index b12b742613..0000000000 --- ql/src/test/queries/clientpositive/index_auth.q +++ /dev/null @@ -1,20 +0,0 @@ -set hive.stats.dbclass=fs; -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; - -create table foobar(key int, value string) PARTITIONED BY (ds string, hr string); -alter table foobar add partition (ds='2008-04-08',hr='12'); - -CREATE INDEX srcpart_AUTH_index ON TABLE foobar(key) as 'BITMAP' WITH DEFERRED REBUILD; -SHOW INDEXES ON foobar; - -grant select on table foobar to user hive_test_user; -grant select on table default__foobar_srcpart_auth_indeX__ to user hive_test_user; -grant update on table default__foobar_srcpart_auth_indEx__ to user hive_test_user; -grant create on table default__foobar_srcpart_auth_inDex__ to user hive_test_user; -set hive.security.authorization.enabled=true; - -ALTER INDEX srcpart_auth_INDEX ON foobar PARTITION (ds='2008-04-08',hr='12') REBUILD; -set hive.security.authorization.enabled=false; -DROP INDEX srcpart_auth_index on foobar; -DROP TABLE foobar; diff --git ql/src/test/queries/clientpositive/index_auto.q ql/src/test/queries/clientpositive/index_auto.q deleted file mode 100644 index fe8839a130..0000000000 --- ql/src/test/queries/clientpositive/index_auto.q +++ /dev/null @@ -1,31 +0,0 @@ -set hive.mapred.mode=nonstrict; --- try the query without indexing, with manual indexing, and with automatic indexing --- SORT_QUERY_RESULTS - --- without indexing -SELECT key, value FROM src WHERE key > 80 AND key < 100; - -set hive.stats.dbclass=fs; -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; - --- manual indexing -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_where" SELECT `_bucketname` , `_offsets` FROM default__src_src_index__ WHERE key > 80 AND key < 100; -SET hive.index.compact.file=${system:test.tmp.dir}/index_where; -SET hive.optimize.index.filter=false; -SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat; - -EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100; -SELECT key, value FROM src WHERE key > 80 AND key < 100; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; - --- automatic indexing -EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100; -SELECT key, value FROM src WHERE key > 80 AND key < 100; - -DROP INDEX src_index on src; diff --git ql/src/test/queries/clientpositive/index_auto_empty.q ql/src/test/queries/clientpositive/index_auto_empty.q deleted file mode 100644 index 7567887b74..0000000000 --- ql/src/test/queries/clientpositive/index_auto_empty.q +++ /dev/null @@ -1,26 +0,0 @@ -set hive.mapred.mode=nonstrict; --- Test to ensure that an empty index result is propagated correctly - -CREATE DATABASE it; --- Create temp, and populate it with some values in src. -CREATE TABLE it.temp(key STRING, val STRING) STORED AS TEXTFILE; - -set hive.stats.dbclass=fs; --- Build an index on it.temp. -CREATE INDEX temp_index ON TABLE it.temp(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX temp_index ON it.temp REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; - --- query should not return any values -SELECT * FROM it.it__temp_temp_index__ WHERE key = 86; -EXPLAIN SELECT * FROM it.temp WHERE key = 86; -SELECT * FROM it.temp WHERE key = 86; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=false; -DROP table it.temp; - -DROP DATABASE it; diff --git ql/src/test/queries/clientpositive/index_auto_file_format.q ql/src/test/queries/clientpositive/index_auto_file_format.q deleted file mode 100644 index 2afafb8267..0000000000 --- ql/src/test/queries/clientpositive/index_auto_file_format.q +++ /dev/null @@ -1,23 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.dbclass=fs; - --- SORT_QUERY_RESULTS --- test automatic use of index on different file formats -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; - -EXPLAIN SELECT key, value FROM src WHERE key=86; -SELECT key, value FROM src WHERE key=86; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; - -EXPLAIN SELECT key, value FROM src WHERE key=86; -SELECT key, value FROM src WHERE key=86; - -DROP INDEX src_index on src; diff --git ql/src/test/queries/clientpositive/index_auto_mult_tables.q ql/src/test/queries/clientpositive/index_auto_mult_tables.q deleted file mode 100644 index 924060b098..0000000000 --- ql/src/test/queries/clientpositive/index_auto_mult_tables.q +++ /dev/null @@ -1,25 +0,0 @@ -set hive.mapred.mode=nonstrict; --- SORT_QUERY_RESULTS --- try the query without indexing, with manual indexing, and with automatic indexing - --- without indexing -EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90; -SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90; - -set hive.stats.dbclass=fs; - -CREATE INDEX src_index_bitmap ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD; -ALTER INDEX src_index_bitmap ON src REBUILD; - -CREATE INDEX srcpart_index_bitmap ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD; -ALTER INDEX srcpart_index_bitmap ON srcpart REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; - -EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90; -SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90; - -DROP INDEX src_index_bitmap on src; -DROP INDEX srcpart_index_bitmap on srcpart; diff --git ql/src/test/queries/clientpositive/index_auto_mult_tables_compact.q ql/src/test/queries/clientpositive/index_auto_mult_tables_compact.q deleted file mode 100644 index 20f34d1c2e..0000000000 --- ql/src/test/queries/clientpositive/index_auto_mult_tables_compact.q +++ /dev/null @@ -1,26 +0,0 @@ -set hive.mapred.mode=nonstrict; --- SORT_QUERY_RESULTS --- try the query without indexing, with manual indexing, and with automatic indexing - --- without indexing -EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90; -SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90; - -set hive.stats.dbclass=fs; - -CREATE INDEX src_index_compact ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_index_compact ON src REBUILD; - -CREATE INDEX srcpart_index_compact ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX srcpart_index_compact ON srcpart REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; - --- automatic indexing -EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90; -SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90; - -DROP INDEX src_index_compact on src; -DROP INDEX srcpart_index_compact on srcpart; diff --git ql/src/test/queries/clientpositive/index_auto_multiple.q ql/src/test/queries/clientpositive/index_auto_multiple.q deleted file mode 100644 index 2bcb5a52c6..0000000000 --- ql/src/test/queries/clientpositive/index_auto_multiple.q +++ /dev/null @@ -1,20 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.dbclass=fs; - --- SORT_QUERY_RESULTS --- With multiple indexes, make sure we choose which to use in a consistent order - -CREATE INDEX src_key_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -CREATE INDEX src_val_index ON TABLE src(value) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_key_index ON src REBUILD; -ALTER INDEX src_val_index ON src REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; - -EXPLAIN SELECT key, value FROM src WHERE key=86; -SELECT key, value FROM src WHERE key=86; - -DROP INDEX src_key_index ON src; -DROP INDEX src_val_index ON src; diff --git ql/src/test/queries/clientpositive/index_auto_partitioned.q ql/src/test/queries/clientpositive/index_auto_partitioned.q deleted file mode 100644 index e25fdb9b5c..0000000000 --- ql/src/test/queries/clientpositive/index_auto_partitioned.q +++ /dev/null @@ -1,17 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.dbclass=fs; -set hive.fetch.task.conversion=none; - --- SORT_QUERY_RESULTS --- test automatic use of index on table with partitions -CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_part_index ON srcpart REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; - -EXPLAIN SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09'; -SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09'; - -DROP INDEX src_part_index ON srcpart; diff --git ql/src/test/queries/clientpositive/index_auto_self_join.q ql/src/test/queries/clientpositive/index_auto_self_join.q deleted file mode 100644 index 2ce6d1edc8..0000000000 --- ql/src/test/queries/clientpositive/index_auto_self_join.q +++ /dev/null @@ -1,19 +0,0 @@ -set hive.mapred.mode=nonstrict; --- SORT_QUERY_RESULTS --- try the query without indexing, with manual indexing, and with automatic indexing - -EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90; -SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90; - -set hive.stats.dbclass=fs; -CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; - -EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90; -SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90; - -DROP INDEX src_index on src; diff --git ql/src/test/queries/clientpositive/index_auto_unused.q ql/src/test/queries/clientpositive/index_auto_unused.q deleted file mode 100644 index 4e33366a5f..0000000000 --- ql/src/test/queries/clientpositive/index_auto_unused.q +++ /dev/null @@ -1,64 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.dbclass=fs; - --- SORT_QUERY_RESULTS --- test cases where the index should not be used automatically - -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=5368709120; -SET hive.optimize.index.filter.compact.maxsize=-1; - --- min size too large (src is less than 5G) -EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100; -SELECT * FROM src WHERE key > 80 AND key < 100; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; -SET hive.optimize.index.filter.compact.maxsize=1; - --- max size too small -EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100; -SELECT * FROM src WHERE key > 80 AND key < 100; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; -SET hive.optimize.index.filter.compact.maxsize=-1; - --- OR predicate not supported by compact indexes -EXPLAIN SELECT * FROM src WHERE key < 10 OR key > 480; -SELECT * FROM src WHERE key < 10 OR key > 480; - - SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; -SET hive.optimize.index.filter.compact.maxsize=-1; - --- columns are not covered by indexes -DROP INDEX src_index on src; -CREATE INDEX src_val_index ON TABLE src(value) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_val_index ON src REBUILD; - -EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100; -SELECT * FROM src WHERE key > 80 AND key < 100; - -DROP INDEX src_val_index on src; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; -SET hive.optimize.index.filter.compact.maxsize=-1; - --- required partitions have not been built yet -CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_part_index ON srcpart PARTITION (ds='2008-04-08', hr=11) REBUILD; - -EXPLAIN SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10; -SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10; - -DROP INDEX src_part_index on srcpart; diff --git ql/src/test/queries/clientpositive/index_auto_update.q ql/src/test/queries/clientpositive/index_auto_update.q deleted file mode 100644 index b184080927..0000000000 --- ql/src/test/queries/clientpositive/index_auto_update.q +++ /dev/null @@ -1,29 +0,0 @@ -set hive.mapred.mode=nonstrict; --- Test if index is actually being used. - --- Create temp, and populate it with some values in src. -CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE; -INSERT OVERWRITE TABLE temp SELECT * FROM src WHERE key < 50; - --- Build an index on temp. -CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX temp_index ON temp REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.autoupdate=true; -SET hive.optimize.index.filter.compact.minsize=0; - --- overwrite temp table so index is out of date -EXPLAIN INSERT OVERWRITE TABLE temp SELECT * FROM src; -INSERT OVERWRITE TABLE temp SELECT * FROM src; - --- query should return indexed values -EXPLAIN SELECT * FROM temp WHERE key = 86; -SELECT * FROM temp WHERE key = 86; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=false; -drop index temp_index on temp; -DROP table temp; - diff --git ql/src/test/queries/clientpositive/index_bitmap.q ql/src/test/queries/clientpositive/index_bitmap.q deleted file mode 100644 index 91a4e547b9..0000000000 --- ql/src/test/queries/clientpositive/index_bitmap.q +++ /dev/null @@ -1,52 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.dbclass=fs; - --- SORT_QUERY_RESULTS - -DROP INDEX srcpart_index_proj on srcpart; - -EXPLAIN -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD; -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD; -ALTER INDEX srcpart_index_proj ON srcpart REBUILD; -SELECT x.* FROM default__srcpart_srcpart_index_proj__ x WHERE x.ds = '2008-04-08' and x.hr = 11; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname`, -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__ -x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' GROUP BY `_bucketname`; -SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_test_index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat; -SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08'; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname` , -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__ -x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' and x.hr = 11 GROUP BY `_bucketname`; -SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_test_index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat; -SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11; - -DROP INDEX srcpart_index_proj on srcpart; - -EXPLAIN -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD; -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD; -ALTER INDEX srcpart_index_proj ON srcpart REBUILD; -SELECT x.* FROM default__srcpart_srcpart_index_proj__ x; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` , -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__ -WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname`; -SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat; -SELECT key, value FROM srcpart WHERE key=100; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT key, value FROM srcpart WHERE key=100; - -DROP INDEX srcpart_index_proj on srcpart; diff --git ql/src/test/queries/clientpositive/index_bitmap1.q ql/src/test/queries/clientpositive/index_bitmap1.q deleted file mode 100644 index ff6ae5d2ea..0000000000 --- ql/src/test/queries/clientpositive/index_bitmap1.q +++ /dev/null @@ -1,22 +0,0 @@ -set hive.stats.dbclass=fs; - --- SORT_QUERY_RESULTS - -EXPLAIN -CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD; -CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; -SELECT x.* FROM default__src_src_index__ x; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname`, -COLLECT_SET(`_offset`) as `_offsets` FROM default__src_src_index__ WHERE NOT -EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname`; -SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat; -SELECT key, value FROM src WHERE key=100; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT key, value FROM src WHERE key=100; - -DROP INDEX src_index ON src; diff --git ql/src/test/queries/clientpositive/index_bitmap2.q ql/src/test/queries/clientpositive/index_bitmap2.q deleted file mode 100644 index 89fbe764e9..0000000000 --- ql/src/test/queries/clientpositive/index_bitmap2.q +++ /dev/null @@ -1,39 +0,0 @@ -set hive.stats.dbclass=fs; - --- SORT_QUERY_RESULTS - -EXPLAIN -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD; -EXPLAIN -CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD; - -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD; -CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD; -ALTER INDEX src1_index ON src REBUILD; -ALTER INDEX src2_index ON src REBUILD; -SELECT * FROM default__src_src1_index__; -SELECT * FROM default__src_src2_index__; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; - -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" -SELECT t.bucketname as `_bucketname`, COLLECT_SET(t.offset) AS `_offsets` FROM - (SELECT `_bucketname` AS bucketname, `_offset` AS offset - FROM default__src_src1_index__ - WHERE key = 0 AND NOT EWAH_BITMAP_EMPTY(`_bitmaps`) UNION ALL - SELECT `_bucketname` AS bucketname, `_offset` AS offset - FROM default__src_src2_index__ - WHERE value = "val2" AND NOT EWAH_BITMAP_EMPTY(`_bitmaps`)) t -GROUP BY t.bucketname; - -SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat; - -SELECT key, value FROM src WHERE key=0 OR value = "val_2"; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT key, value FROM src WHERE key=0 OR value = "val_2"; - -DROP INDEX src1_index ON src; -DROP INDEX src2_index ON src; - diff --git ql/src/test/queries/clientpositive/index_bitmap3.q ql/src/test/queries/clientpositive/index_bitmap3.q deleted file mode 100644 index 73bdc89955..0000000000 --- ql/src/test/queries/clientpositive/index_bitmap3.q +++ /dev/null @@ -1,52 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.autogather=true; - --- SORT_QUERY_RESULTS - -EXPLAIN -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD; -EXPLAIN -CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD; - -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD; -CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD; -ALTER INDEX src1_index ON src REBUILD; -ALTER INDEX src2_index ON src REBUILD; -SELECT * FROM default__src_src1_index__; -SELECT * FROM default__src_src2_index__; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; - -EXPLAIN -SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets` -FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__ - WHERE key = 0) a - JOIN - (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__ - WHERE value = "val_0") b - ON - a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT -EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname; - -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" -SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets` -FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__ - WHERE key = 0) a - JOIN - (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__ - WHERE value = "val_0") b - ON - a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT -EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname; - -SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat; - -SELECT key, value FROM src WHERE key=0 AND value = "val_0"; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT key, value FROM src WHERE key=0 AND value = "val_0"; - -DROP INDEX src1_index ON src; -DROP INDEX src2_index ON src; - diff --git ql/src/test/queries/clientpositive/index_bitmap_auto.q ql/src/test/queries/clientpositive/index_bitmap_auto.q deleted file mode 100644 index 80209009ba..0000000000 --- ql/src/test/queries/clientpositive/index_bitmap_auto.q +++ /dev/null @@ -1,57 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.autogather=true; - --- SORT_QUERY_RESULTS - --- try the query without indexing, with manual indexing, and with automatic indexing --- without indexing -SELECT key, value FROM src WHERE key=0 AND value = "val_0"; - --- create indices -EXPLAIN -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD; -EXPLAIN -CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD; -CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD; -ALTER INDEX src1_index ON src REBUILD; -ALTER INDEX src2_index ON src REBUILD; -SELECT * FROM default__src_src1_index__; -SELECT * FROM default__src_src2_index__; - - --- manual indexing -EXPLAIN -SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets` -FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__ - WHERE key = 0) a - JOIN - (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__ - WHERE value = "val_0") b - ON - a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT -EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname; - -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" -SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets` -FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__ - WHERE key = 0) a - JOIN - (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__ - WHERE value = "val_0") b - ON - a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT -EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname; - -SELECT key, value FROM src WHERE key=0 AND value = "val_0"; - - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SELECT key, value FROM src WHERE key=0 AND value = "val_0"; - -DROP INDEX src1_index ON src; -DROP INDEX src2_index ON src; - diff --git ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q deleted file mode 100644 index c6c558b3df..0000000000 --- ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q +++ /dev/null @@ -1,17 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.dbclass=fs; -set hive.fetch.task.conversion=none; - --- SORT_QUERY_RESULTS - --- test automatic use of index on table with partitions -CREATE INDEX src_part_index ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD; -ALTER INDEX src_part_index ON srcpart REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; - -EXPLAIN SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09'; -SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09'; - -DROP INDEX src_part_index ON srcpart; diff --git ql/src/test/queries/clientpositive/index_bitmap_compression.q ql/src/test/queries/clientpositive/index_bitmap_compression.q deleted file mode 100644 index 9b0bbe8b00..0000000000 --- ql/src/test/queries/clientpositive/index_bitmap_compression.q +++ /dev/null @@ -1,18 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.dbclass=fs; -SET hive.exec.compress.output=true; - --- SORT_QUERY_RESULTS - -CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; - --- automatic indexing -EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100; -SELECT key, value FROM src WHERE key > 80 AND key < 100; - -DROP INDEX src_index on src; diff --git ql/src/test/queries/clientpositive/index_bitmap_rc.q ql/src/test/queries/clientpositive/index_bitmap_rc.q deleted file mode 100644 index b8a4f12c97..0000000000 --- ql/src/test/queries/clientpositive/index_bitmap_rc.q +++ /dev/null @@ -1,58 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.dbclass=fs; - --- SORT_QUERY_RESULTS - -CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE; - -INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11; -INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 12; -INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 11; -INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 12; - -EXPLAIN -CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD; -CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD; -ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD; -SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.ds = '2008-04-08' and x.hr = 11; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname`, -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ -x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' GROUP BY `_bucketname`; -SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_test_index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat; -SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08'; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname` , -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ -x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' and x.hr = 11 GROUP BY `_bucketname`; -SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_test_index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat; -SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11; - -DROP INDEX srcpart_rc_index on srcpart_rc; - -EXPLAIN -CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD; -CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD; -ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD; -SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.key = 100; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` , -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ -WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname`; -SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat; -SELECT key, value FROM srcpart_rc WHERE key=100; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT key, value FROM srcpart_rc WHERE key=100; - -DROP INDEX srcpart_rc_index on srcpart_rc; -DROP TABLE srcpart_rc; diff --git ql/src/test/queries/clientpositive/index_compact.q ql/src/test/queries/clientpositive/index_compact.q deleted file mode 100644 index 6add673063..0000000000 --- ql/src/test/queries/clientpositive/index_compact.q +++ /dev/null @@ -1,46 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.dbclass=fs; - --- SORT_QUERY_RESULTS - -DROP INDEX srcpart_index_proj on srcpart; - -EXPLAIN -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD; -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX srcpart_index_proj ON srcpart REBUILD; -SELECT x.* FROM default__srcpart_srcpart_index_proj__ x WHERE x.ds = '2008-04-08' and x.hr = 11; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname` , `_offsets` FROM default__srcpart_srcpart_index_proj__ x WHERE x.key=100 AND x.ds = '2008-04-08'; -SET hive.index.compact.file=${system:test.tmp.dir}/index_test_index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat; -SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08'; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname` , `_offsets` FROM default__srcpart_srcpart_index_proj__ x WHERE x.key=100 AND x.ds = '2008-04-08' and x.hr = 11; -SET hive.index.compact.file=${system:test.tmp.dir}/index_test_index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat; -SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11; - -DROP INDEX srcpart_index_proj on srcpart; - -EXPLAIN -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD; -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX srcpart_index_proj ON srcpart REBUILD; -SELECT x.* FROM default__srcpart_srcpart_index_proj__ x; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` , `_offsets` FROM default__srcpart_srcpart_index_proj__ WHERE key=100; -SET hive.index.compact.file=${system:test.tmp.dir}/index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat; -SELECT key, value FROM srcpart WHERE key=100; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT key, value FROM srcpart WHERE key=100; - -DROP INDEX srcpart_index_proj on srcpart; diff --git ql/src/test/queries/clientpositive/index_compact_1.q ql/src/test/queries/clientpositive/index_compact_1.q deleted file mode 100644 index 9cdd563ec7..0000000000 --- ql/src/test/queries/clientpositive/index_compact_1.q +++ /dev/null @@ -1,20 +0,0 @@ -set hive.stats.dbclass=fs; - --- SORT_QUERY_RESULTS - -EXPLAIN -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; -SELECT x.* FROM default__src_src_index__ x; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` , `_offsets` FROM default__src_src_index__ WHERE key=100; -SET hive.index.compact.file=${system:test.tmp.dir}/index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat; -SELECT key, value FROM src WHERE key=100; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT key, value FROM src WHERE key=100; - -DROP INDEX src_index on src; diff --git ql/src/test/queries/clientpositive/index_compact_2.q ql/src/test/queries/clientpositive/index_compact_2.q deleted file mode 100644 index 7b2fce21a6..0000000000 --- ql/src/test/queries/clientpositive/index_compact_2.q +++ /dev/null @@ -1,50 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.dbclass=fs; - --- SORT_QUERY_RESULTS - -CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE; - -INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11; -INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 12; -INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 11; -INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 12; - -CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD; -SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.ds = '2008-04-08' and x.hr = 11; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname` , `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.key=100 AND x.ds = '2008-04-08'; -SET hive.index.compact.file=${system:test.tmp.dir}/index_test_index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat; -SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08'; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname` , `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.key=100 AND x.ds = '2008-04-08' and x.hr = 11; -SET hive.index.compact.file=${system:test.tmp.dir}/index_test_index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat; -SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11; - -DROP INDEX srcpart_rc_index on srcpart_rc; - -EXPLAIN -CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD; -CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD; -SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` , `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ WHERE key=100; -SET hive.index.compact.file=${system:test.tmp.dir}/index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat; -SELECT key, value FROM srcpart_rc WHERE key=100; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT key, value FROM srcpart_rc WHERE key=100; - -DROP INDEX srcpart_rc_index on srcpart_rc; -DROP TABLE srcpart_rc; diff --git ql/src/test/queries/clientpositive/index_compact_3.q ql/src/test/queries/clientpositive/index_compact_3.q deleted file mode 100644 index 15ba946b82..0000000000 --- ql/src/test/queries/clientpositive/index_compact_3.q +++ /dev/null @@ -1,23 +0,0 @@ -set hive.stats.dbclass=fs; - --- SORT_QUERY_RESULTS - -CREATE TABLE src_index_test_rc (key int, value string) STORED AS RCFILE; - -INSERT OVERWRITE TABLE src_index_test_rc SELECT * FROM src; - -CREATE INDEX src_index ON TABLE src_index_test_rc(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src_index_test_rc REBUILD; -SELECT x.* FROM default__src_index_test_rc_src_index__ x; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` , `_offsets` FROM default__src_index_test_rc_src_index__ WHERE key=100; -SET hive.index.compact.file=${system:test.tmp.dir}/index_result; -SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat; -SELECT key, value FROM src_index_test_rc WHERE key=100; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT key, value FROM src_index_test_rc WHERE key=100; - -DROP INDEX src_index on src_index_test_rc; -DROP TABLE src_index_test_rc; diff --git ql/src/test/queries/clientpositive/index_compact_binary_search.q ql/src/test/queries/clientpositive/index_compact_binary_search.q deleted file mode 100644 index e72b27c781..0000000000 --- ql/src/test/queries/clientpositive/index_compact_binary_search.q +++ /dev/null @@ -1,132 +0,0 @@ -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.default.fileformat=TextFile; -set hive.stats.dbclass=fs; -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; - -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=1; -SET hive.index.compact.binary.search=true; - -SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook; - -SELECT * FROM src WHERE key = '0'; - -SELECT * FROM src WHERE key < '1'; - -SELECT * FROM src WHERE key <= '0'; - -SELECT * FROM src WHERE key > '8'; - -SELECT * FROM src WHERE key >= '9'; - -SET hive.exec.post.hooks=; - -DROP INDEX src_index ON src; - -SET hive.default.fileformat=RCFILE; - -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; - -SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook; - -SELECT * FROM src WHERE key = '0'; - -SELECT * FROM src WHERE key < '1'; - -SELECT * FROM src WHERE key <= '0'; - -SELECT * FROM src WHERE key > '8'; - -SELECT * FROM src WHERE key >= '9'; - -SET hive.exec.post.hooks=; - -DROP INDEX src_index ON src; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -SET hive.default.fileformat=TextFile; - -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; - -SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook; - -SELECT * FROM src WHERE key = '0'; - -SELECT * FROM src WHERE key < '1'; - -SELECT * FROM src WHERE key <= '0'; - -SELECT * FROM src WHERE key > '8'; - -SELECT * FROM src WHERE key >= '9'; - -SET hive.exec.post.hooks=; - -DROP INDEX src_index ON src; - -SET hive.default.fileformat=RCFILE; - -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; - -SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook; - -SELECT * FROM src WHERE key = '0'; - -SELECT * FROM src WHERE key < '1'; - -SELECT * FROM src WHERE key <= '0'; - -SELECT * FROM src WHERE key > '8'; - -SELECT * FROM src WHERE key >= '9'; - -SET hive.exec.post.hooks=; - -DROP INDEX src_index ON src; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -SET hive.default.fileformat=TextFile; - -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; - -SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook; - -SELECT * FROM src WHERE key = '0'; - -SELECT * FROM src WHERE key < '1'; - -SELECT * FROM src WHERE key <= '0'; - -SELECT * FROM src WHERE key > '8'; - -SELECT * FROM src WHERE key >= '9'; - -SET hive.exec.post.hooks=; - -DROP INDEX src_index ON src; - -SET hive.default.fileformat=RCFILE; - -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; - -SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook; - -SELECT * FROM src WHERE key = '0'; - -SELECT * FROM src WHERE key < '1'; - -SELECT * FROM src WHERE key <= '0'; - -SELECT * FROM src WHERE key > '8'; - -SELECT * FROM src WHERE key >= '9'; - -SET hive.exec.post.hooks=; - -DROP INDEX src_index ON src; diff --git ql/src/test/queries/clientpositive/index_compression.q ql/src/test/queries/clientpositive/index_compression.q deleted file mode 100644 index be935605db..0000000000 --- ql/src/test/queries/clientpositive/index_compression.q +++ /dev/null @@ -1,18 +0,0 @@ -set hive.mapred.mode=nonstrict; -SET hive.exec.compress.output=true; -SET hive.stats.dbclass=fs; - --- SORT_QUERY_RESULTS - -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX src_index ON src REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; - --- automatic indexing -EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100; -SELECT key, value FROM src WHERE key > 80 AND key < 100; - -DROP INDEX src_index on src; diff --git ql/src/test/queries/clientpositive/index_creation.q ql/src/test/queries/clientpositive/index_creation.q deleted file mode 100644 index ef020b63d5..0000000000 --- ql/src/test/queries/clientpositive/index_creation.q +++ /dev/null @@ -1,54 +0,0 @@ -set hive.stats.dbclass=fs; -drop index src_index_2 on src; -drop index src_index_3 on src; -drop index src_index_4 on src; -drop index src_index_5 on src; -drop index src_index_6 on src; -drop index src_index_7 on src; -drop index src_index_8 on src; -drop index src_index_9 on src; -drop table `_t`; - -create index src_index_2 on table src(key) as 'compact' WITH DEFERRED REBUILD; -desc extended default__src_src_index_2__; - -create index src_index_3 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_3; -desc extended src_idx_src_index_3; - -create index src_index_4 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE; -desc extended default__src_src_index_4__; - -create index src_index_5 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\'; -desc extended default__src_src_index_5__; - -create index src_index_6 on table src(key) as 'compact' WITH DEFERRED REBUILD STORED AS RCFILE; -desc extended default__src_src_index_6__; - -create index src_index_7 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_7 STORED AS RCFILE; -desc extended src_idx_src_index_7; - -create index src_index_8 on table src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2"); -desc extended default__src_src_index_8__; - -create index src_index_9 on table src(key) as 'compact' WITH DEFERRED REBUILD TBLPROPERTIES ("prop1"="val1", "prop2"="val2"); -desc extended default__src_src_index_9__; - -create table `_t`(`_i` int, `_j` int); -create index x on table `_t`(`_j`) as 'compact' WITH DEFERRED REBUILD; -alter index x on `_t` rebuild; - -create index x2 on table `_t`(`_i`,`_j`) as 'compact' WITH DEFERRED -REBUILD; -alter index x2 on `_t` rebuild; - -drop index src_index_2 on src; -drop index src_index_3 on src; -drop index src_index_4 on src; -drop index src_index_5 on src; -drop index src_index_6 on src; -drop index src_index_7 on src; -drop index src_index_8 on src; -drop index src_index_9 on src; -drop table `_t`; - -show tables; diff --git ql/src/test/queries/clientpositive/index_in_db.q ql/src/test/queries/clientpositive/index_in_db.q deleted file mode 100644 index 1c347817b6..0000000000 --- ql/src/test/queries/clientpositive/index_in_db.q +++ /dev/null @@ -1,16 +0,0 @@ -set hive.optimize.index.filter=true; -drop database if exists index_test_db cascade; --- Test selecting selecting from a table that is backed by an index --- create table, index in a db, then set default db as current db, and try selecting - -create database index_test_db; - -use index_test_db; -create table testtb (id int, name string); -create index id_index on table testtb (id) as 'COMPACT' WITH DEFERRED REBUILD in table testdb_id_idx_tb; - -use default; -select * from index_test_db.testtb where id>2; - -use index_test_db; -drop index id_index on testtb; diff --git ql/src/test/queries/clientpositive/index_serde.q ql/src/test/queries/clientpositive/index_serde.q deleted file mode 100644 index 8f20f28b42..0000000000 --- ql/src/test/queries/clientpositive/index_serde.q +++ /dev/null @@ -1,52 +0,0 @@ -set hive.stats.dbclass=fs; - --- SORT_QUERY_RESULTS --- Want to ensure we can build and use indices on tables stored with SerDes --- Build the (Avro backed) table -CREATE TABLE doctors -ROW FORMAT -SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' -STORED AS -INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' -OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' -TBLPROPERTIES ('avro.schema.literal'='{ - "namespace": "testing.hive.avro.serde", - "name": "doctors", - "type": "record", - "fields": [ - { - "name":"number", - "type":"int", - "doc":"Order of playing the role" - }, - { - "name":"first_name", - "type":"string", - "doc":"first name of actor playing role" - }, - { - "name":"last_name", - "type":"string", - "doc":"last name of actor playing role" - } - ] -}'); - -DESCRIBE doctors; - -LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors; - --- Create and build an index -CREATE INDEX doctors_index ON TABLE doctors(number) AS 'COMPACT' WITH DEFERRED REBUILD; -DESCRIBE EXTENDED default__doctors_doctors_index__; -ALTER INDEX doctors_index ON doctors REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; - -EXPLAIN SELECT * FROM doctors WHERE number > 6; -SELECT * FROM doctors WHERE number > 6; - -DROP INDEX doctors_index ON doctors; -DROP TABLE doctors; diff --git ql/src/test/queries/clientpositive/index_skewtable.q ql/src/test/queries/clientpositive/index_skewtable.q deleted file mode 100644 index e85e646e70..0000000000 --- ql/src/test/queries/clientpositive/index_skewtable.q +++ /dev/null @@ -1,23 +0,0 @@ -set hive.mapred.mode=nonstrict; --- Test creating an index on skewed table - --- Create a skew table -CREATE TABLE kv(key STRING, value STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE; - -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE kv; - --- Create and build an index -CREATE INDEX kv_index ON TABLE kv(value) AS 'COMPACT' WITH DEFERRED REBUILD; -DESCRIBE FORMATTED default__kv_kv_index__; -ALTER INDEX kv_index ON kv REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; - --- Run a query that uses the index -EXPLAIN SELECT * FROM kv WHERE value > '15' ORDER BY value; -SELECT * FROM kv WHERE value > '15' ORDER BY value; - -DROP INDEX kv_index ON kv; -DROP TABLE kv; diff --git ql/src/test/queries/clientpositive/index_stale.q ql/src/test/queries/clientpositive/index_stale.q deleted file mode 100644 index 6daba83cf8..0000000000 --- ql/src/test/queries/clientpositive/index_stale.q +++ /dev/null @@ -1,23 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.dbclass=fs; --- test that stale indexes are not used - -CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE; -INSERT OVERWRITE TABLE temp SELECT * FROM src WHERE key < 50; - --- Build an index on temp. -CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX temp_index ON temp REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; - --- overwrite temp table so index is out of date -INSERT OVERWRITE TABLE temp SELECT * FROM src; - --- should return correct results bypassing index -EXPLAIN SELECT * FROM temp WHERE key = 86; -SELECT * FROM temp WHERE key = 86; -DROP index temp_index on temp; -DROP table temp; diff --git ql/src/test/queries/clientpositive/index_stale_partitioned.q ql/src/test/queries/clientpositive/index_stale_partitioned.q deleted file mode 100644 index 630b415161..0000000000 --- ql/src/test/queries/clientpositive/index_stale_partitioned.q +++ /dev/null @@ -1,29 +0,0 @@ -set hive.mapred.mode=nonstrict; -set hive.stats.dbclass=fs; --- Test if index is actually being used. - --- Create temp, and populate it with some values in src. -CREATE TABLE temp(key STRING, val STRING) PARTITIONED BY (foo string) STORED AS TEXTFILE; -ALTER TABLE temp ADD PARTITION (foo = 'bar'); -INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src WHERE key < 50; - --- Build an index on temp. -CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX temp_index ON temp PARTITION (foo = 'bar') REBUILD; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; -SET hive.optimize.index.filter.compact.minsize=0; - --- overwrite temp table so index is out of date -INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src; - --- query should not return any values -SELECT * FROM default__temp_temp_index__ WHERE key = 86 AND foo='bar'; -EXPLAIN SELECT * FROM temp WHERE key = 86 AND foo = 'bar'; -SELECT * FROM temp WHERE key = 86 AND foo = 'bar'; - -SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=false; -DROP index temp_index on temp; -DROP table temp; diff --git ql/src/test/queries/clientpositive/orc_merge8.q ql/src/test/queries/clientpositive/orc_merge8.q index 30a892bb2c..8ed4f7e2dc 100644 --- ql/src/test/queries/clientpositive/orc_merge8.q +++ ql/src/test/queries/clientpositive/orc_merge8.q @@ -25,7 +25,6 @@ alter table alltypes_orc set fileformat orc; load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes; SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SET hive.optimize.index.filter=true; set hive.merge.orcfile.stripe.level=false; set hive.merge.tezfiles=false; set hive.merge.mapfiles=false; diff --git ql/src/test/queries/clientpositive/special_character_in_tabnames_2.q ql/src/test/queries/clientpositive/special_character_in_tabnames_2.q index d7010e9d36..abe8f4bd06 100644 --- ql/src/test/queries/clientpositive/special_character_in_tabnames_2.q +++ ql/src/test/queries/clientpositive/special_character_in_tabnames_2.q @@ -24,7 +24,6 @@ SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -- manual indexing INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_where" SELECT `_bucketname` , `_offsets` FROM `default__s/c_src_index__` WHERE key > 80 AND key < 100; -SET hive.index.compact.file=${system:test.tmp.dir}/index_where; SET hive.optimize.index.filter=false; SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat; diff --git ql/src/test/results/clientnegative/index_bitmap_no_map_aggr.q.out ql/src/test/results/clientnegative/index_bitmap_no_map_aggr.q.out deleted file mode 100644 index b29d3a6698..0000000000 --- ql/src/test/results/clientnegative/index_bitmap_no_map_aggr.q.out +++ /dev/null @@ -1,20 +0,0 @@ -PREHOOK: query: EXPLAIN -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src1_index__ -FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: org.apache.hadoop.hive.ql.metadata.HiveException: Cannot construct index without map-side aggregation diff --git ql/src/test/results/clientnegative/index_compact_entry_limit.q.out ql/src/test/results/clientnegative/index_compact_entry_limit.q.out deleted file mode 100644 index f844ee49c0..0000000000 --- ql/src/test/results/clientnegative/index_compact_entry_limit.q.out +++ /dev/null @@ -1,37 +0,0 @@ -PREHOOK: query: drop index src_index on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -POSTHOOK: query: ALTER INDEX src_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -#### A masked pattern was here #### -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -#### A masked pattern was here #### -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_index__ -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM src WHERE key=100 ORDER BY key -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -Job Submission failed with exception 'java.io.IOException(org.apache.hadoop.hive.ql.metadata.HiveException: Number of compact index entries loaded during the query exceeded the maximum of 5 set in hive.index.compact.query.max.entries)' -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.mr.MapRedTask. org.apache.hadoop.hive.ql.metadata.HiveException: Number of compact index entries loaded during the query exceeded the maximum of 5 set in hive.index.compact.query.max.entries diff --git ql/src/test/results/clientnegative/index_compact_size_limit.q.out ql/src/test/results/clientnegative/index_compact_size_limit.q.out deleted file mode 100644 index 9ff8f8fcd1..0000000000 --- ql/src/test/results/clientnegative/index_compact_size_limit.q.out +++ /dev/null @@ -1,37 +0,0 @@ -PREHOOK: query: drop index src_index on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -POSTHOOK: query: ALTER INDEX src_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -#### A masked pattern was here #### -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -#### A masked pattern was here #### -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_index__ -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM src WHERE key=100 ORDER BY key -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -Job Submission failed with exception 'java.io.IOException(Size of data to read during a compact-index-based query exceeded the maximum of 1024 set in hive.index.compact.query.max.size)' -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.mr.MapRedTask. Size of data to read during a compact-index-based query exceeded the maximum of 1024 set in hive.index.compact.query.max.size diff --git ql/src/test/results/clientpositive/index_auth.q.out ql/src/test/results/clientpositive/index_auth.q.out deleted file mode 100644 index 385b639f92..0000000000 --- ql/src/test/results/clientpositive/index_auth.q.out +++ /dev/null @@ -1,79 +0,0 @@ -PREHOOK: query: create table foobar(key int, value string) PARTITIONED BY (ds string, hr string) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@foobar -POSTHOOK: query: create table foobar(key int, value string) PARTITIONED BY (ds string, hr string) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@foobar -PREHOOK: query: alter table foobar add partition (ds='2008-04-08',hr='12') -PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@foobar -POSTHOOK: query: alter table foobar add partition (ds='2008-04-08',hr='12') -POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@foobar -POSTHOOK: Output: default@foobar@ds=2008-04-08/hr=12 -PREHOOK: query: CREATE INDEX srcpart_AUTH_index ON TABLE foobar(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@foobar -POSTHOOK: query: CREATE INDEX srcpart_AUTH_index ON TABLE foobar(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@foobar -POSTHOOK: Output: default@default__foobar_srcpart_auth_index__ -PREHOOK: query: SHOW INDEXES ON foobar -PREHOOK: type: SHOWINDEXES -POSTHOOK: query: SHOW INDEXES ON foobar -POSTHOOK: type: SHOWINDEXES -srcpart_auth_index foobar key default__foobar_srcpart_auth_index__ bitmap -PREHOOK: query: grant select on table foobar to user hive_test_user -PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@foobar -POSTHOOK: query: grant select on table foobar to user hive_test_user -POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@foobar -PREHOOK: query: grant select on table default__foobar_srcpart_auth_indeX__ to user hive_test_user -PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@default__foobar_srcpart_auth_index__ -POSTHOOK: query: grant select on table default__foobar_srcpart_auth_indeX__ to user hive_test_user -POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@default__foobar_srcpart_auth_index__ -PREHOOK: query: grant update on table default__foobar_srcpart_auth_indEx__ to user hive_test_user -PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@default__foobar_srcpart_auth_index__ -POSTHOOK: query: grant update on table default__foobar_srcpart_auth_indEx__ to user hive_test_user -POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@default__foobar_srcpart_auth_index__ -PREHOOK: query: grant create on table default__foobar_srcpart_auth_inDex__ to user hive_test_user -PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@default__foobar_srcpart_auth_index__ -POSTHOOK: query: grant create on table default__foobar_srcpart_auth_inDex__ to user hive_test_user -POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@default__foobar_srcpart_auth_index__ -PREHOOK: query: ALTER INDEX srcpart_auth_INDEX ON foobar PARTITION (ds='2008-04-08',hr='12') REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@foobar -PREHOOK: Input: default@foobar@ds=2008-04-08/hr=12 -PREHOOK: Output: default@default__foobar_srcpart_auth_index__@ds=2008-04-08/hr=12 -POSTHOOK: query: ALTER INDEX srcpart_auth_INDEX ON foobar PARTITION (ds='2008-04-08',hr='12') REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@foobar -POSTHOOK: Input: default@foobar@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@default__foobar_srcpart_auth_index__@ds=2008-04-08/hr=12 -POSTHOOK: Lineage: default__foobar_srcpart_auth_index__ PARTITION(ds=2008-04-08,hr=12)._bitmaps EXPRESSION [(foobar)foobar.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__foobar_srcpart_auth_index__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(foobar)foobar.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__foobar_srcpart_auth_index__ PARTITION(ds=2008-04-08,hr=12)._offset SIMPLE [(foobar)foobar.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__foobar_srcpart_auth_index__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(foobar)foobar.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: DROP INDEX srcpart_auth_index on foobar -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@foobar -POSTHOOK: query: DROP INDEX srcpart_auth_index on foobar -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@foobar -PREHOOK: query: DROP TABLE foobar -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@foobar -PREHOOK: Output: default@foobar -POSTHOOK: query: DROP TABLE foobar -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@foobar -POSTHOOK: Output: default@foobar diff --git ql/src/test/results/clientpositive/index_auto.q.out ql/src/test/results/clientpositive/index_auto.q.out deleted file mode 100644 index 654e419f8b..0000000000 --- ql/src/test/results/clientpositive/index_auto.q.out +++ /dev/null @@ -1,255 +0,0 @@ -PREHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -82 val_82 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -85 val_85 -86 val_86 -87 val_87 -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -POSTHOOK: query: ALTER INDEX src_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -#### A masked pattern was here #### -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -#### A masked pattern was here #### -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_index__ -#### A masked pattern was here #### -PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -82 val_82 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -85 val_85 -86 val_86 -87 val_87 -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 - Stage-1 depends on stages: Stage-2 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: default__src_src_index__ - filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean) - Filter Operator - predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offsets (type: array) - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-8 - Conditional Operator - - Stage: Stage-5 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-2 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-6 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-7 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_index__ -POSTHOOK: Input: default@src -#### A masked pattern was here #### -82 val_82 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -85 val_85 -86 val_86 -87 val_87 -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: DROP INDEX src_index on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src_index on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src diff --git ql/src/test/results/clientpositive/index_auto_empty.q.out ql/src/test/results/clientpositive/index_auto_empty.q.out deleted file mode 100644 index 0191339bb6..0000000000 --- ql/src/test/results/clientpositive/index_auto_empty.q.out +++ /dev/null @@ -1,101 +0,0 @@ -PREHOOK: query: CREATE DATABASE it -PREHOOK: type: CREATEDATABASE -PREHOOK: Output: database:it -POSTHOOK: query: CREATE DATABASE it -POSTHOOK: type: CREATEDATABASE -POSTHOOK: Output: database:it -PREHOOK: query: CREATE TABLE it.temp(key STRING, val STRING) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:it -PREHOOK: Output: it@temp -POSTHOOK: query: CREATE TABLE it.temp(key STRING, val STRING) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:it -POSTHOOK: Output: it@temp -PREHOOK: query: CREATE INDEX temp_index ON TABLE it.temp(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: it@temp -POSTHOOK: query: CREATE INDEX temp_index ON TABLE it.temp(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: it@temp -POSTHOOK: Output: it@it__temp_temp_index__ -PREHOOK: query: ALTER INDEX temp_index ON it.temp REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: it@temp -PREHOOK: Output: it@it__temp_temp_index__ -POSTHOOK: query: ALTER INDEX temp_index ON it.temp REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: it@temp -POSTHOOK: Output: it@it__temp_temp_index__ -POSTHOOK: Lineage: it__temp_temp_index__._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: it__temp_temp_index__._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: it__temp_temp_index__.key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM it.it__temp_temp_index__ WHERE key = 86 -PREHOOK: type: QUERY -PREHOOK: Input: it@it__temp_temp_index__ -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM it.it__temp_temp_index__ WHERE key = 86 -POSTHOOK: type: QUERY -POSTHOOK: Input: it@it__temp_temp_index__ -#### A masked pattern was here #### -PREHOOK: query: EXPLAIN SELECT * FROM it.temp WHERE key = 86 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT * FROM it.temp WHERE key = 86 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: temp - filterExpr: (UDFToDouble(key) = 86.0) (type: boolean) - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: key (type: string), val (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT * FROM it.temp WHERE key = 86 -PREHOOK: type: QUERY -PREHOOK: Input: it@temp -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM it.temp WHERE key = 86 -POSTHOOK: type: QUERY -POSTHOOK: Input: it@temp -#### A masked pattern was here #### -PREHOOK: query: DROP table it.temp -PREHOOK: type: DROPTABLE -PREHOOK: Input: it@temp -PREHOOK: Output: it@temp -POSTHOOK: query: DROP table it.temp -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: it@temp -POSTHOOK: Output: it@temp -PREHOOK: query: DROP DATABASE it -PREHOOK: type: DROPDATABASE -PREHOOK: Input: database:it -PREHOOK: Output: database:it -POSTHOOK: query: DROP DATABASE it -POSTHOOK: type: DROPDATABASE -POSTHOOK: Input: database:it -POSTHOOK: Output: database:it diff --git ql/src/test/results/clientpositive/index_auto_file_format.q.out ql/src/test/results/clientpositive/index_auto_file_format.q.out deleted file mode 100644 index 21c8085da6..0000000000 --- ql/src/test/results/clientpositive/index_auto_file_format.q.out +++ /dev/null @@ -1,256 +0,0 @@ -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -POSTHOOK: query: ALTER INDEX src_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key=86 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key=86 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 - Stage-1 depends on stages: Stage-2 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: default__src_src_index__ - filterExpr: (UDFToDouble(key) = 86.0) (type: boolean) - Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offsets (type: array) - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-8 - Conditional Operator - - Stage: Stage-5 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-2 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - filterExpr: (UDFToDouble(key) = 86.0) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-6 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-7 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT key, value FROM src WHERE key=86 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src WHERE key=86 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_index__ -POSTHOOK: Input: default@src -#### A masked pattern was here #### -86 val_86 -PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key=86 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key=86 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 - Stage-1 depends on stages: Stage-2 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: default__src_src_index__ - filterExpr: (UDFToDouble(key) = 86.0) (type: boolean) - Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offsets (type: array) - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-8 - Conditional Operator - - Stage: Stage-5 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-2 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - filterExpr: (UDFToDouble(key) = 86.0) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-6 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-7 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT key, value FROM src WHERE key=86 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src WHERE key=86 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_index__ -POSTHOOK: Input: default@src -#### A masked pattern was here #### -86 val_86 -PREHOOK: query: DROP INDEX src_index on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src_index on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src diff --git ql/src/test/results/clientpositive/index_auto_mult_tables.q.out ql/src/test/results/clientpositive/index_auto_mult_tables.q.out deleted file mode 100644 index d970b25404..0000000000 --- ql/src/test/results/clientpositive/index_auto_mult_tables.q.out +++ /dev/null @@ -1,438 +0,0 @@ -PREHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) - TableScan - alias: b - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -82 val_82 -82 val_82 -82 val_82 -82 val_82 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -85 val_85 -85 val_85 -85 val_85 -85 val_85 -86 val_86 -86 val_86 -86 val_86 -86 val_86 -87 val_87 -87 val_87 -87 val_87 -87 val_87 -PREHOOK: query: CREATE INDEX src_index_bitmap ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_index_bitmap ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index_bitmap__ -PREHOOK: query: ALTER INDEX src_index_bitmap ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index_bitmap__ -POSTHOOK: query: ALTER INDEX src_index_bitmap ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index_bitmap__ -POSTHOOK: Lineage: default__src_src_index_bitmap__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index_bitmap__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_index_bitmap__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index_bitmap__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: CREATE INDEX srcpart_index_bitmap ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: CREATE INDEX srcpart_index_bitmap ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@srcpart -POSTHOOK: Output: default@default__srcpart_srcpart_index_bitmap__ -PREHOOK: query: ALTER INDEX srcpart_index_bitmap ON srcpart REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=11 -PREHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=12 -PREHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=11 -PREHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=12 -POSTHOOK: query: ALTER INDEX srcpart_index_bitmap ON srcpart REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-4 is a root stage - Stage-3 depends on stages: Stage-4 - Stage-1 depends on stages: Stage-3, Stage-5 - Stage-6 is a root stage - Stage-5 depends on stages: Stage-6 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - alias: default__src_src_index_bitmap__ - filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean) - Filter Operator - predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offset (type: bigint) - outputColumnNames: _bucketname, _offset - Group By Operator - aggregations: collect_set(_offset) - keys: _bucketname (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - value expressions: _col1 (type: array) - Reduce Operator Tree: - Group By Operator - aggregations: collect_set(VALUE._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-3 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: a - filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0)) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) - TableScan - alias: b - filterExpr: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-6 - Map Reduce - Map Operator Tree: - TableScan - alias: default__srcpart_srcpart_index_bitmap__ - filterExpr: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean) - Filter Operator - predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offset (type: bigint) - outputColumnNames: _bucketname, _offset - Group By Operator - aggregations: collect_set(_offset) - keys: _bucketname (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - value expressions: _col1 (type: array) - Reduce Operator Tree: - Group By Operator - aggregations: collect_set(VALUE._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-5 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index_bitmap__ -PREHOOK: Input: default@default__srcpart_srcpart_index_bitmap__ -PREHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=11 -PREHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=12 -PREHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=11 -PREHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=12 -PREHOOK: Input: default@src -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_index_bitmap__ -POSTHOOK: Input: default@default__srcpart_srcpart_index_bitmap__ -POSTHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=12 -POSTHOOK: Input: default@src -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -82 val_82 -82 val_82 -82 val_82 -82 val_82 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -85 val_85 -85 val_85 -85 val_85 -85 val_85 -86 val_86 -86 val_86 -86 val_86 -86 val_86 -87 val_87 -87 val_87 -87 val_87 -87 val_87 -PREHOOK: query: DROP INDEX src_index_bitmap on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src_index_bitmap on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: DROP INDEX srcpart_index_bitmap on srcpart -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: DROP INDEX srcpart_index_bitmap on srcpart -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@srcpart diff --git ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out deleted file mode 100644 index 65eb0de5fe..0000000000 --- ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out +++ /dev/null @@ -1,485 +0,0 @@ -PREHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) - TableScan - alias: b - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -82 val_82 -82 val_82 -82 val_82 -82 val_82 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -85 val_85 -85 val_85 -85 val_85 -85 val_85 -86 val_86 -86 val_86 -86 val_86 -86 val_86 -87 val_87 -87 val_87 -87 val_87 -87 val_87 -PREHOOK: query: CREATE INDEX src_index_compact ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_index_compact ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index_compact__ -PREHOOK: query: ALTER INDEX src_index_compact ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index_compact__ -POSTHOOK: query: ALTER INDEX src_index_compact ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index_compact__ -POSTHOOK: Lineage: default__src_src_index_compact__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_index_compact__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index_compact__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: CREATE INDEX srcpart_index_compact ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: CREATE INDEX srcpart_index_compact ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@srcpart -POSTHOOK: Output: default@default__srcpart_srcpart_index_compact__ -PREHOOK: query: ALTER INDEX srcpart_index_compact ON srcpart REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=11 -PREHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=12 -PREHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=11 -PREHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=12 -POSTHOOK: query: ALTER INDEX srcpart_index_compact ON srcpart REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-08,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-08,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-09,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-09,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-4 is a root stage - Stage-9 depends on stages: Stage-4 , consists of Stage-6, Stage-5, Stage-7 - Stage-6 - Stage-3 depends on stages: Stage-6, Stage-5, Stage-8 - Stage-1 depends on stages: Stage-3, Stage-10 - Stage-5 - Stage-7 - Stage-8 depends on stages: Stage-7 - Stage-11 is a root stage - Stage-16 depends on stages: Stage-11 , consists of Stage-13, Stage-12, Stage-14 - Stage-13 - Stage-10 depends on stages: Stage-13, Stage-12, Stage-15 - Stage-12 - Stage-14 - Stage-15 depends on stages: Stage-14 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - alias: default__src_src_index_compact__ - filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0)) (type: boolean) - Filter Operator - predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offsets (type: array) - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-9 - Conditional Operator - - Stage: Stage-6 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-3 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: a - filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0)) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) - TableScan - alias: b - filterExpr: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-5 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-7 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-8 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-11 - Map Reduce - Map Operator Tree: - TableScan - alias: default__srcpart_srcpart_index_compact__ - filterExpr: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Filter Operator - predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offsets (type: array) - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-16 - Conditional Operator - - Stage: Stage-13 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-10 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-12 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-14 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-15 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index_compact__ -PREHOOK: Input: default@default__srcpart_srcpart_index_compact__ -PREHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=11 -PREHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=12 -PREHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=11 -PREHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=12 -PREHOOK: Input: default@src -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_index_compact__ -POSTHOOK: Input: default@default__srcpart_srcpart_index_compact__ -POSTHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=12 -POSTHOOK: Input: default@src -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -82 val_82 -82 val_82 -82 val_82 -82 val_82 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -85 val_85 -85 val_85 -85 val_85 -85 val_85 -86 val_86 -86 val_86 -86 val_86 -86 val_86 -87 val_87 -87 val_87 -87 val_87 -87 val_87 -PREHOOK: query: DROP INDEX src_index_compact on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src_index_compact on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: DROP INDEX srcpart_index_compact on srcpart -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: DROP INDEX srcpart_index_compact on srcpart -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@srcpart diff --git ql/src/test/results/clientpositive/index_auto_multiple.q.out ql/src/test/results/clientpositive/index_auto_multiple.q.out deleted file mode 100644 index dfc2f346a5..0000000000 --- ql/src/test/results/clientpositive/index_auto_multiple.q.out +++ /dev/null @@ -1,164 +0,0 @@ -PREHOOK: query: CREATE INDEX src_key_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_key_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_key_index__ -PREHOOK: query: CREATE INDEX src_val_index ON TABLE src(value) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_val_index ON TABLE src(value) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_val_index__ -PREHOOK: query: ALTER INDEX src_key_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_key_index__ -POSTHOOK: query: ALTER INDEX src_key_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_key_index__ -POSTHOOK: Lineage: default__src_src_key_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_key_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_key_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: ALTER INDEX src_val_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_val_index__ -POSTHOOK: query: ALTER INDEX src_val_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_val_index__ -POSTHOOK: Lineage: default__src_src_val_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_val_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_val_index__.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key=86 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key=86 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 - Stage-1 depends on stages: Stage-2 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: default__src_src_key_index__ - filterExpr: (UDFToDouble(key) = 86.0) (type: boolean) - Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offsets (type: array) - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-8 - Conditional Operator - - Stage: Stage-5 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-2 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - filterExpr: (UDFToDouble(key) = 86.0) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-6 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-7 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT key, value FROM src WHERE key=86 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_key_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src WHERE key=86 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_key_index__ -POSTHOOK: Input: default@src -#### A masked pattern was here #### -86 val_86 -PREHOOK: query: DROP INDEX src_key_index ON src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src_key_index ON src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: DROP INDEX src_val_index ON src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src_val_index ON src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src diff --git ql/src/test/results/clientpositive/index_auto_partitioned.q.out ql/src/test/results/clientpositive/index_auto_partitioned.q.out deleted file mode 100644 index 8c2d6e4717..0000000000 --- ql/src/test/results/clientpositive/index_auto_partitioned.q.out +++ /dev/null @@ -1,172 +0,0 @@ -PREHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@srcpart -POSTHOOK: Output: default@default__srcpart_src_part_index__ -PREHOOK: query: ALTER INDEX src_part_index ON srcpart REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=11 -PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=12 -PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11 -PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12 -POSTHOOK: query: ALTER INDEX src_part_index ON srcpart REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: EXPLAIN SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09' -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09' -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 - Stage-1 depends on stages: Stage-2 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: default__srcpart_src_part_index__ - filterExpr: ((UDFToDouble(key) = 86.0) and (ds = '2008-04-09')) (type: boolean) - Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offsets (type: array) - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-8 - Conditional Operator - - Stage: Stage-5 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-2 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: srcpart - filterExpr: ((UDFToDouble(key) = 86.0) and (ds = '2008-04-09')) (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-6 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-7 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_src_part_index__ -PREHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11 -PREHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12 -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_src_part_index__ -POSTHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12 -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -86 val_86 -86 val_86 -PREHOOK: query: DROP INDEX src_part_index ON srcpart -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: DROP INDEX src_part_index ON srcpart -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@srcpart diff --git ql/src/test/results/clientpositive/index_auto_self_join.q.out ql/src/test/results/clientpositive/index_auto_self_join.q.out deleted file mode 100644 index 08c851b319..0000000000 --- ql/src/test/results/clientpositive/index_auto_self_join.q.out +++ /dev/null @@ -1,295 +0,0 @@ -PREHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0) and value is not null) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - TableScan - alias: b - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 70.0) and value is not null) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col1 (type: string) - 1 _col1 (type: string) - outputColumnNames: _col0, _col2 - Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col2 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -82 82 -83 83 -83 83 -83 83 -83 83 -84 84 -84 84 -84 84 -84 84 -85 85 -86 86 -87 87 -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -POSTHOOK: query: ALTER INDEX src_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -POSTHOOK: Lineage: default__src_src_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-4 is a root stage - Stage-3 depends on stages: Stage-4 - Stage-1 depends on stages: Stage-3, Stage-5 - Stage-6 is a root stage - Stage-5 depends on stages: Stage-6 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - alias: default__src_src_index__ - filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean) - Filter Operator - predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offset (type: bigint) - outputColumnNames: _bucketname, _offset - Group By Operator - aggregations: collect_set(_offset) - keys: _bucketname (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - value expressions: _col1 (type: array) - Reduce Operator Tree: - Group By Operator - aggregations: collect_set(VALUE._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-3 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: a - filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and value is not null) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0) and value is not null) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - TableScan - alias: b - filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and value is not null) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 70.0) and value is not null) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col1 (type: string) - 1 _col1 (type: string) - outputColumnNames: _col0, _col2 - Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col2 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-6 - Map Reduce - Map Operator Tree: - TableScan - alias: default__src_src_index__ - filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean) - Filter Operator - predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 70.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offset (type: bigint) - outputColumnNames: _bucketname, _offset - Group By Operator - aggregations: collect_set(_offset) - keys: _bucketname (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - value expressions: _col1 (type: array) - Reduce Operator Tree: - Group By Operator - aggregations: collect_set(VALUE._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-5 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_index__ -POSTHOOK: Input: default@src -#### A masked pattern was here #### -82 82 -83 83 -83 83 -83 83 -83 83 -84 84 -84 84 -84 84 -84 84 -85 85 -86 86 -87 87 -PREHOOK: query: DROP INDEX src_index on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src_index on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src diff --git ql/src/test/results/clientpositive/index_auto_unused.q.out ql/src/test/results/clientpositive/index_auto_unused.q.out deleted file mode 100644 index a960d96ea1..0000000000 --- ql/src/test/results/clientpositive/index_auto_unused.q.out +++ /dev/null @@ -1,388 +0,0 @@ -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -POSTHOOK: query: ALTER INDEX src_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT * FROM src WHERE key > 80 AND key < 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM src WHERE key > 80 AND key < 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -82 val_82 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -85 val_85 -86 val_86 -87 val_87 -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT * FROM src WHERE key > 80 AND key < 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM src WHERE key > 80 AND key < 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -82 val_82 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -85 val_85 -86 val_86 -87 val_87 -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: EXPLAIN SELECT * FROM src WHERE key < 10 OR key > 480 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT * FROM src WHERE key < 10 OR key > 480 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - filterExpr: ((UDFToDouble(key) < 10.0) or (UDFToDouble(key) > 480.0)) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 10.0) or (UDFToDouble(key) > 480.0)) (type: boolean) - Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT * FROM src WHERE key < 10 OR key > 480 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM src WHERE key < 10 OR key > 480 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -2 val_2 -4 val_4 -481 val_481 -482 val_482 -483 val_483 -484 val_484 -485 val_485 -487 val_487 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -490 val_490 -491 val_491 -492 val_492 -492 val_492 -493 val_493 -494 val_494 -495 val_495 -496 val_496 -497 val_497 -498 val_498 -498 val_498 -498 val_498 -5 val_5 -5 val_5 -5 val_5 -8 val_8 -9 val_9 -PREHOOK: query: DROP INDEX src_index on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src_index on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: CREATE INDEX src_val_index ON TABLE src(value) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_val_index ON TABLE src(value) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_val_index__ -PREHOOK: query: ALTER INDEX src_val_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_val_index__ -POSTHOOK: query: ALTER INDEX src_val_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_val_index__ -POSTHOOK: Lineage: default__src_src_val_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_val_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_val_index__.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT * FROM src WHERE key > 80 AND key < 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM src WHERE key > 80 AND key < 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -82 val_82 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -85 val_85 -86 val_86 -87 val_87 -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: DROP INDEX src_val_index on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src_val_index on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@srcpart -POSTHOOK: Output: default@default__srcpart_src_part_index__ -PREHOOK: query: ALTER INDEX src_part_index ON srcpart PARTITION (ds='2008-04-08', hr=11) REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=11 -POSTHOOK: query: ALTER INDEX src_part_index ON srcpart PARTITION (ds='2008-04-08', hr=11) REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: EXPLAIN SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: srcpart - filterExpr: ((ds = '2008-04-09') and (12.0 = 12.0) and (UDFToDouble(key) < 10.0)) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string), '2008-04-09' (type: string), hr (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - ListSink - -PREHOOK: query: SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -0 val_0 2008-04-09 12 -0 val_0 2008-04-09 12 -0 val_0 2008-04-09 12 -2 val_2 2008-04-09 12 -4 val_4 2008-04-09 12 -5 val_5 2008-04-09 12 -5 val_5 2008-04-09 12 -5 val_5 2008-04-09 12 -8 val_8 2008-04-09 12 -9 val_9 2008-04-09 12 -PREHOOK: query: DROP INDEX src_part_index on srcpart -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: DROP INDEX src_part_index on srcpart -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@srcpart diff --git ql/src/test/results/clientpositive/index_auto_update.q.out ql/src/test/results/clientpositive/index_auto_update.q.out deleted file mode 100644 index e48b657434..0000000000 --- ql/src/test/results/clientpositive/index_auto_update.q.out +++ /dev/null @@ -1,353 +0,0 @@ -PREHOOK: query: CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@temp -POSTHOOK: query: CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@temp -PREHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src WHERE key < 50 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@temp -POSTHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src WHERE key < 50 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@temp -POSTHOOK: Lineage: temp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: temp.val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@temp -POSTHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@temp -POSTHOOK: Output: default@default__temp_temp_index__ -PREHOOK: query: ALTER INDEX temp_index ON temp REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@temp -PREHOOK: Output: default@default__temp_temp_index__ -POSTHOOK: query: ALTER INDEX temp_index ON temp REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@temp -POSTHOOK: Output: default@default__temp_temp_index__ -POSTHOOK: Lineage: default__temp_temp_index__._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__temp_temp_index__._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__temp_temp_index__.key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE temp SELECT * FROM src -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE temp SELECT * FROM src -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-6 is a root stage - Stage-12 depends on stages: Stage-6 , consists of Stage-9, Stage-8, Stage-10 - Stage-9 - Stage-0 depends on stages: Stage-9, Stage-8, Stage-11 - Stage-2 depends on stages: Stage-0 - Stage-1 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-1, Stage-4, Stage-5 - Stage-4 depends on stages: Stage-2 - Stage-7 depends on stages: Stage-0, Stage-4, Stage-5 - Stage-5 depends on stages: Stage-2 - Stage-8 - Stage-10 - Stage-11 depends on stages: Stage-10 - -STAGE PLANS: - Stage: Stage-6 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.temp - Select Operator - expressions: _col0 (type: string), _col1 (type: string) - outputColumnNames: key, val - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: compute_stats(key, 'hll'), compute_stats(val, 'hll') - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: struct), _col1 (type: struct) - Reduce Operator Tree: - Group By Operator - aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-12 - Conditional Operator - - Stage: Stage-9 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.temp - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: temp - Select Operator - expressions: key (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) - outputColumnNames: key, BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME - Group By Operator - aggregations: collect_set(BLOCK__OFFSET__INSIDE__FILE) - keys: key (type: string), INPUT__FILE__NAME (type: string) - mode: hash - outputColumnNames: _col0, _col1, _col2 - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: string) - value expressions: _col2 (type: array) - Reduce Operator Tree: - Group By Operator - aggregations: collect_set(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1, _col2 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.default__temp_temp_index__ - - Stage: Stage-1 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.default__temp_temp_index__ - - Stage: Stage-3 - Stats Work - Basic Stats Work: - - Stage: Stage-4 - - Stage: Stage-7 - Stats Work - Basic Stats Work: - Column Stats Desc: - Columns: key, val - Column Types: string, string - Table: default.temp - - Stage: Stage-5 - - Stage: Stage-8 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.temp - - Stage: Stage-10 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.temp - - Stage: Stage-11 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - -PREHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Input: default@temp -PREHOOK: Output: default@temp -POSTHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Input: default@temp -POSTHOOK: Output: default@temp -POSTHOOK: Lineage: default__temp_temp_index__._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__temp_temp_index__._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__temp_temp_index__.key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: temp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: temp.val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 - Stage-1 depends on stages: Stage-2 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: default__temp_temp_index__ - filterExpr: (UDFToDouble(key) = 86.0) (type: boolean) - Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offsets (type: array) - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-8 - Conditional Operator - - Stage: Stage-5 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-2 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: temp - filterExpr: (UDFToDouble(key) = 86.0) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), val (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-6 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-7 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT * FROM temp WHERE key = 86 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__temp_temp_index__ -PREHOOK: Input: default@temp -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM temp WHERE key = 86 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__temp_temp_index__ -POSTHOOK: Input: default@temp -#### A masked pattern was here #### -86 val_86 -PREHOOK: query: drop index temp_index on temp -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@temp -POSTHOOK: query: drop index temp_index on temp -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@temp -PREHOOK: query: DROP table temp -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@temp -PREHOOK: Output: default@temp -POSTHOOK: query: DROP table temp -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@temp -POSTHOOK: Output: default@temp diff --git ql/src/test/results/clientpositive/index_bitmap.q.out ql/src/test/results/clientpositive/index_bitmap.q.out deleted file mode 100644 index 5017027225..0000000000 --- ql/src/test/results/clientpositive/index_bitmap.q.out +++ /dev/null @@ -1,291 +0,0 @@ -PREHOOK: query: DROP INDEX srcpart_index_proj on srcpart -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: DROP INDEX srcpart_index_proj on srcpart -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@srcpart -PREHOOK: query: EXPLAIN -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@srcpart -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__ -PREHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -POSTHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x WHERE x.ds = '2008-04-08' and x.hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__ -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -POSTHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x WHERE x.ds = '2008-04-08' and x.hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__ -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__ -x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' GROUP BY `_bucketname` -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__ -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__ -x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' GROUP BY `_bucketname` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__ -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -100 val_100 -100 val_100 -100 val_100 -100 val_100 -#### A masked pattern was here #### -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__ -x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' and x.hr = 11 GROUP BY `_bucketname` -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__ -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__ -x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' and x.hr = 11 GROUP BY `_bucketname` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__ -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -100 val_100 -100 val_100 -PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -100 val_100 -100 val_100 -PREHOOK: query: DROP INDEX srcpart_index_proj on srcpart -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: DROP INDEX srcpart_index_proj on srcpart -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@srcpart -PREHOOK: query: EXPLAIN -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@srcpart -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__ -PREHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -POSTHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__ -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__ -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__ -WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname` -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__ -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__ -WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__ -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -PREHOOK: query: DROP INDEX srcpart_index_proj on srcpart -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: DROP INDEX srcpart_index_proj on srcpart -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@srcpart diff --git ql/src/test/results/clientpositive/index_bitmap1.q.out ql/src/test/results/clientpositive/index_bitmap1.q.out deleted file mode 100644 index 8f3af66ed5..0000000000 --- ql/src/test/results/clientpositive/index_bitmap1.q.out +++ /dev/null @@ -1,75 +0,0 @@ -PREHOOK: query: EXPLAIN -CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -POSTHOOK: query: ALTER INDEX src_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -POSTHOOK: Lineage: default__src_src_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT x.* FROM default__src_src_index__ x -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -#### A masked pattern was here #### -POSTHOOK: query: SELECT x.* FROM default__src_src_index__ x -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_index__ -#### A masked pattern was here #### -COLLECT_SET(`_offset`) as `_offsets` FROM default__src_src_index__ WHERE NOT -EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname` -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -#### A masked pattern was here #### -COLLECT_SET(`_offset`) as `_offsets` FROM default__src_src_index__ WHERE NOT -EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_index__ -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM src WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -100 val_100 -100 val_100 -PREHOOK: query: SELECT key, value FROM src WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -100 val_100 -100 val_100 -PREHOOK: query: DROP INDEX src_index ON src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src_index ON src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src diff --git ql/src/test/results/clientpositive/index_bitmap2.q.out ql/src/test/results/clientpositive/index_bitmap2.q.out deleted file mode 100644 index 716e3c8940..0000000000 --- ql/src/test/results/clientpositive/index_bitmap2.q.out +++ /dev/null @@ -1,138 +0,0 @@ -PREHOOK: query: EXPLAIN -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: EXPLAIN -CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src1_index__ -PREHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src2_index__ -PREHOOK: query: ALTER INDEX src1_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src1_index__ -POSTHOOK: query: ALTER INDEX src1_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src1_index__ -POSTHOOK: Lineage: default__src_src1_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src1_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src1_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src1_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: ALTER INDEX src2_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src2_index__ -POSTHOOK: query: ALTER INDEX src2_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src2_index__ -POSTHOOK: Lineage: default__src_src2_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src2_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src2_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src2_index__.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM default__src_src1_index__ -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src1_index__ -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM default__src_src1_index__ -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src1_index__ -#### A masked pattern was here #### -PREHOOK: query: SELECT * FROM default__src_src2_index__ -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src2_index__ -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM default__src_src2_index__ -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src2_index__ -#### A masked pattern was here #### -SELECT t.bucketname as `_bucketname`, COLLECT_SET(t.offset) AS `_offsets` FROM - (SELECT `_bucketname` AS bucketname, `_offset` AS offset - FROM default__src_src1_index__ - WHERE key = 0 AND NOT EWAH_BITMAP_EMPTY(`_bitmaps`) UNION ALL - SELECT `_bucketname` AS bucketname, `_offset` AS offset - FROM default__src_src2_index__ - WHERE value = "val2" AND NOT EWAH_BITMAP_EMPTY(`_bitmaps`)) t -GROUP BY t.bucketname -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src1_index__ -PREHOOK: Input: default@default__src_src2_index__ -#### A masked pattern was here #### -SELECT t.bucketname as `_bucketname`, COLLECT_SET(t.offset) AS `_offsets` FROM - (SELECT `_bucketname` AS bucketname, `_offset` AS offset - FROM default__src_src1_index__ - WHERE key = 0 AND NOT EWAH_BITMAP_EMPTY(`_bitmaps`) UNION ALL - SELECT `_bucketname` AS bucketname, `_offset` AS offset - FROM default__src_src2_index__ - WHERE value = "val2" AND NOT EWAH_BITMAP_EMPTY(`_bitmaps`)) t -GROUP BY t.bucketname -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src1_index__ -POSTHOOK: Input: default@default__src_src2_index__ -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM src WHERE key=0 OR value = "val_2" -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src WHERE key=0 OR value = "val_2" -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -2 val_2 -PREHOOK: query: SELECT key, value FROM src WHERE key=0 OR value = "val_2" -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src WHERE key=0 OR value = "val_2" -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -2 val_2 -PREHOOK: query: DROP INDEX src1_index ON src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src1_index ON src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: DROP INDEX src2_index ON src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src2_index ON src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src diff --git ql/src/test/results/clientpositive/index_bitmap3.q.out ql/src/test/results/clientpositive/index_bitmap3.q.out deleted file mode 100644 index 87c9c3695c..0000000000 --- ql/src/test/results/clientpositive/index_bitmap3.q.out +++ /dev/null @@ -1,1262 +0,0 @@ -PREHOOK: query: EXPLAIN -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: EXPLAIN -CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src1_index__ -PREHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src2_index__ -PREHOOK: query: ALTER INDEX src1_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src1_index__ -POSTHOOK: query: ALTER INDEX src1_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src1_index__ -POSTHOOK: Lineage: default__src_src1_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src1_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src1_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src1_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: ALTER INDEX src2_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src2_index__ -POSTHOOK: query: ALTER INDEX src2_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src2_index__ -POSTHOOK: Lineage: default__src_src2_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src2_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src2_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src2_index__.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM default__src_src1_index__ -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src1_index__ -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT * FROM default__src_src1_index__ -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src1_index__ -POSTHOOK: Output: hdfs://### HDFS PATH ### -0 hdfs://### HDFS PATH ### 2088 [1,2,4,8589934592,1,0] -0 hdfs://### HDFS PATH ### 2632 [1,2,4,8589934592,1,0] -0 hdfs://### HDFS PATH ### 968 [1,2,4,8589934592,1,0] -10 hdfs://### HDFS PATH ### 2846 [1,2,4,8589934592,1,0] -100 hdfs://### HDFS PATH ### 2156 [1,2,4,8589934592,1,0] -100 hdfs://### HDFS PATH ### 5374 [1,2,4,8589934592,1,0] -103 hdfs://### HDFS PATH ### 1484 [1,2,4,8589934592,1,0] -103 hdfs://### HDFS PATH ### 3614 [1,2,4,8589934592,1,0] -104 hdfs://### HDFS PATH ### 4114 [1,2,4,8589934592,1,0] -104 hdfs://### HDFS PATH ### 4628 [1,2,4,8589934592,1,0] -105 hdfs://### HDFS PATH ### 4196 [1,2,4,8589934592,1,0] -11 hdfs://### HDFS PATH ### 3170 [1,2,4,8589934592,1,0] -111 hdfs://### HDFS PATH ### 1186 [1,2,4,8589934592,1,0] -113 hdfs://### HDFS PATH ### 3638 [1,2,4,8589934592,1,0] -113 hdfs://### HDFS PATH ### 920 [1,2,4,8589934592,1,0] -114 hdfs://### HDFS PATH ### 4280 [1,2,4,8589934592,1,0] -116 hdfs://### HDFS PATH ### 3746 [1,2,4,8589934592,1,0] -118 hdfs://### HDFS PATH ### 2686 [1,2,4,8589934592,1,0] -118 hdfs://### HDFS PATH ### 2780 [1,2,4,8589934592,1,0] -119 hdfs://### HDFS PATH ### 2064 [1,2,4,8589934592,1,0] -119 hdfs://### HDFS PATH ### 3332 [1,2,4,8589934592,1,0] -119 hdfs://### HDFS PATH ### 4674 [1,2,4,8589934592,1,0] -12 hdfs://### HDFS PATH ### 1720 [1,2,4,8589934592,1,0] -12 hdfs://### HDFS PATH ### 4362 [1,2,4,8589934592,1,0] -120 hdfs://### HDFS PATH ### 2284 [1,2,4,8589934592,1,0] -120 hdfs://### HDFS PATH ### 4830 [1,2,4,8589934592,1,0] -125 hdfs://### HDFS PATH ### 1344 [1,2,4,8589934592,1,0] -125 hdfs://### HDFS PATH ### 4468 [1,2,4,8589934592,1,0] -126 hdfs://### HDFS PATH ### 5732 [1,2,4,8589934592,1,0] -128 hdfs://### HDFS PATH ### 208 [1,2,4,8589934592,1,0] -128 hdfs://### HDFS PATH ### 3896 [1,2,4,8589934592,1,0] -128 hdfs://### HDFS PATH ### 988 [1,2,4,8589934592,1,0] -129 hdfs://### HDFS PATH ### 1094 [1,2,4,8589934592,1,0] -129 hdfs://### HDFS PATH ### 2040 [1,2,4,8589934592,1,0] -131 hdfs://### HDFS PATH ### 2296 [1,2,4,8589934592,1,0] -133 hdfs://### HDFS PATH ### 5164 [1,2,4,8589934592,1,0] -134 hdfs://### HDFS PATH ### 2698 [1,2,4,8589934592,1,0] -134 hdfs://### HDFS PATH ### 5294 [1,2,4,8589934592,1,0] -136 hdfs://### HDFS PATH ### 5080 [1,2,4,8589934592,1,0] -137 hdfs://### HDFS PATH ### 1650 [1,2,4,8589934592,1,0] -137 hdfs://### HDFS PATH ### 2552 [1,2,4,8589934592,1,0] -138 hdfs://### HDFS PATH ### 1472 [1,2,4,8589934592,1,0] -138 hdfs://### HDFS PATH ### 1848 [1,2,4,8589934592,1,0] -138 hdfs://### HDFS PATH ### 2734 [1,2,4,8589934592,1,0] -138 hdfs://### HDFS PATH ### 3470 [1,2,4,8589934592,1,0] -143 hdfs://### HDFS PATH ### 3226 [1,2,4,8589934592,1,0] -145 hdfs://### HDFS PATH ### 304 [1,2,4,8589934592,1,0] -146 hdfs://### HDFS PATH ### 232 [1,2,4,8589934592,1,0] -146 hdfs://### HDFS PATH ### 5430 [1,2,4,8589934592,1,0] -149 hdfs://### HDFS PATH ### 1058 [1,2,4,8589934592,1,0] -149 hdfs://### HDFS PATH ### 3422 [1,2,4,8589934592,1,0] -15 hdfs://### HDFS PATH ### 2770 [1,2,4,8589934592,1,0] -15 hdfs://### HDFS PATH ### 386 [1,2,4,8589934592,1,0] -150 hdfs://### HDFS PATH ### 150 [1,2,4,8589934592,1,0] -152 hdfs://### HDFS PATH ### 280 [1,2,4,8589934592,1,0] -152 hdfs://### HDFS PATH ### 5648 [1,2,4,8589934592,1,0] -153 hdfs://### HDFS PATH ### 502 [1,2,4,8589934592,1,0] -155 hdfs://### HDFS PATH ### 932 [1,2,4,8589934592,1,0] -156 hdfs://### HDFS PATH ### 2352 [1,2,4,8589934592,1,0] -157 hdfs://### HDFS PATH ### 1140 [1,2,4,8589934592,1,0] -158 hdfs://### HDFS PATH ### 2052 [1,2,4,8589934592,1,0] -160 hdfs://### HDFS PATH ### 3274 [1,2,4,8589934592,1,0] -162 hdfs://### HDFS PATH ### 754 [1,2,4,8589934592,1,0] -163 hdfs://### HDFS PATH ### 4650 [1,2,4,8589934592,1,0] -164 hdfs://### HDFS PATH ### 4408 [1,2,4,8589934592,1,0] -164 hdfs://### HDFS PATH ### 4492 [1,2,4,8589934592,1,0] -165 hdfs://### HDFS PATH ### 2236 [1,2,4,8589934592,1,0] -165 hdfs://### HDFS PATH ### 44 [1,2,4,8589934592,1,0] -166 hdfs://### HDFS PATH ### 418 [1,2,4,8589934592,1,0] -167 hdfs://### HDFS PATH ### 3686 [1,2,4,8589934592,1,0] -167 hdfs://### HDFS PATH ### 5502 [1,2,4,8589934592,1,0] -167 hdfs://### HDFS PATH ### 874 [1,2,4,8589934592,1,0] -168 hdfs://### HDFS PATH ### 3180 [1,2,4,8589934592,1,0] -169 hdfs://### HDFS PATH ### 1308 [1,2,4,8589934592,1,0] -169 hdfs://### HDFS PATH ### 2588 [1,2,4,8589934592,1,0] -169 hdfs://### HDFS PATH ### 4854 [1,2,4,8589934592,1,0] -169 hdfs://### HDFS PATH ### 5754 [1,2,4,8589934592,1,0] -17 hdfs://### HDFS PATH ### 910 [1,2,4,8589934592,1,0] -170 hdfs://### HDFS PATH ### 1106 [1,2,4,8589934592,1,0] -172 hdfs://### HDFS PATH ### 2018 [1,2,4,8589934592,1,0] -172 hdfs://### HDFS PATH ### 5104 [1,2,4,8589934592,1,0] -174 hdfs://### HDFS PATH ### 598 [1,2,4,8589934592,1,0] -174 hdfs://### HDFS PATH ### 682 [1,2,4,8589934592,1,0] -175 hdfs://### HDFS PATH ### 4150 [1,2,4,8589934592,1,0] -175 hdfs://### HDFS PATH ### 5176 [1,2,4,8589934592,1,0] -176 hdfs://### HDFS PATH ### 1428 [1,2,4,8589934592,1,0] -176 hdfs://### HDFS PATH ### 1556 [1,2,4,8589934592,1,0] -177 hdfs://### HDFS PATH ### 3036 [1,2,4,8589934592,1,0] -178 hdfs://### HDFS PATH ### 4938 [1,2,4,8589934592,1,0] -179 hdfs://### HDFS PATH ### 2006 [1,2,4,8589934592,1,0] -179 hdfs://### HDFS PATH ### 2674 [1,2,4,8589934592,1,0] -18 hdfs://### HDFS PATH ### 5340 [1,2,4,8589934592,1,0] -18 hdfs://### HDFS PATH ### 5514 [1,2,4,8589934592,1,0] -180 hdfs://### HDFS PATH ### 1696 [1,2,4,8589934592,1,0] -181 hdfs://### HDFS PATH ### 1742 [1,2,4,8589934592,1,0] -183 hdfs://### HDFS PATH ### 5536 [1,2,4,8589934592,1,0] -186 hdfs://### HDFS PATH ### 5466 [1,2,4,8589934592,1,0] -187 hdfs://### HDFS PATH ### 1416 [1,2,4,8589934592,1,0] -187 hdfs://### HDFS PATH ### 2492 [1,2,4,8589934592,1,0] -187 hdfs://### HDFS PATH ### 4516 [1,2,4,8589934592,1,0] -189 hdfs://### HDFS PATH ### 5188 [1,2,4,8589934592,1,0] -19 hdfs://### HDFS PATH ### 2824 [1,2,4,8589934592,1,0] -190 hdfs://### HDFS PATH ### 4244 [1,2,4,8589934592,1,0] -191 hdfs://### HDFS PATH ### 2192 [1,2,4,8589934592,1,0] -191 hdfs://### HDFS PATH ### 3852 [1,2,4,8589934592,1,0] -192 hdfs://### HDFS PATH ### 1392 [1,2,4,8589934592,1,0] -193 hdfs://### HDFS PATH ### 126 [1,2,4,8589934592,1,0] -193 hdfs://### HDFS PATH ### 4078 [1,2,4,8589934592,1,0] -193 hdfs://### HDFS PATH ### 514 [1,2,4,8589934592,1,0] -194 hdfs://### HDFS PATH ### 5684 [1,2,4,8589934592,1,0] -195 hdfs://### HDFS PATH ### 3286 [1,2,4,8589934592,1,0] -195 hdfs://### HDFS PATH ### 886 [1,2,4,8589934592,1,0] -196 hdfs://### HDFS PATH ### 2410 [1,2,4,8589934592,1,0] -197 hdfs://### HDFS PATH ### 2108 [1,2,4,8589934592,1,0] -197 hdfs://### HDFS PATH ### 2480 [1,2,4,8589934592,1,0] -199 hdfs://### HDFS PATH ### 2180 [1,2,4,8589934592,1,0] -199 hdfs://### HDFS PATH ### 4806 [1,2,4,8589934592,1,0] -199 hdfs://### HDFS PATH ### 646 [1,2,4,8589934592,1,0] -2 hdfs://### HDFS PATH ### 4004 [1,2,4,8589934592,1,0] -20 hdfs://### HDFS PATH ### 1118 [1,2,4,8589934592,1,0] -200 hdfs://### HDFS PATH ### 4698 [1,2,4,8589934592,1,0] -200 hdfs://### HDFS PATH ### 5790 [1,2,4,8589934592,1,0] -201 hdfs://### HDFS PATH ### 4384 [1,2,4,8589934592,1,0] -202 hdfs://### HDFS PATH ### 3932 [1,2,4,8589934592,1,0] -203 hdfs://### HDFS PATH ### 4314 [1,2,4,8589934592,1,0] -203 hdfs://### HDFS PATH ### 944 [1,2,4,8589934592,1,0] -205 hdfs://### HDFS PATH ### 1046 [1,2,4,8589934592,1,0] -205 hdfs://### HDFS PATH ### 2272 [1,2,4,8589934592,1,0] -207 hdfs://### HDFS PATH ### 5022 [1,2,4,8589934592,1,0] -207 hdfs://### HDFS PATH ### 634 [1,2,4,8589934592,1,0] -208 hdfs://### HDFS PATH ### 1272 [1,2,4,8589934592,1,0] -208 hdfs://### HDFS PATH ### 1948 [1,2,4,8589934592,1,0] -208 hdfs://### HDFS PATH ### 670 [1,2,4,8589934592,1,0] -209 hdfs://### HDFS PATH ### 3504 [1,2,4,8589934592,1,0] -209 hdfs://### HDFS PATH ### 374 [1,2,4,8589934592,1,0] -213 hdfs://### HDFS PATH ### 1508 [1,2,4,8589934592,1,0] -213 hdfs://### HDFS PATH ### 220 [1,2,4,8589934592,1,0] -214 hdfs://### HDFS PATH ### 5116 [1,2,4,8589934592,1,0] -216 hdfs://### HDFS PATH ### 1520 [1,2,4,8589934592,1,0] -216 hdfs://### HDFS PATH ### 3650 [1,2,4,8589934592,1,0] -217 hdfs://### HDFS PATH ### 1860 [1,2,4,8589934592,1,0] -217 hdfs://### HDFS PATH ### 4396 [1,2,4,8589934592,1,0] -218 hdfs://### HDFS PATH ### 3446 [1,2,4,8589934592,1,0] -219 hdfs://### HDFS PATH ### 3710 [1,2,4,8589934592,1,0] -219 hdfs://### HDFS PATH ### 478 [1,2,4,8589934592,1,0] -221 hdfs://### HDFS PATH ### 1164 [1,2,4,8589934592,1,0] -221 hdfs://### HDFS PATH ### 1580 [1,2,4,8589934592,1,0] -222 hdfs://### HDFS PATH ### 5720 [1,2,4,8589934592,1,0] -223 hdfs://### HDFS PATH ### 3398 [1,2,4,8589934592,1,0] -223 hdfs://### HDFS PATH ### 3758 [1,2,4,8589934592,1,0] -224 hdfs://### HDFS PATH ### 174 [1,2,4,8589934592,1,0] -224 hdfs://### HDFS PATH ### 2892 [1,2,4,8589934592,1,0] -226 hdfs://### HDFS PATH ### 3048 [1,2,4,8589934592,1,0] -228 hdfs://### HDFS PATH ### 3458 [1,2,4,8589934592,1,0] -229 hdfs://### HDFS PATH ### 3202 [1,2,4,8589934592,1,0] -229 hdfs://### HDFS PATH ### 3956 [1,2,4,8589934592,1,0] -230 hdfs://### HDFS PATH ### 1730 [1,2,4,8589934592,1,0] -230 hdfs://### HDFS PATH ### 1936 [1,2,4,8589934592,1,0] -230 hdfs://### HDFS PATH ### 2260 [1,2,4,8589934592,1,0] -230 hdfs://### HDFS PATH ### 3580 [1,2,4,8589934592,1,0] -230 hdfs://### HDFS PATH ### 4914 [1,2,4,8589934592,1,0] -233 hdfs://### HDFS PATH ### 3214 [1,2,4,8589934592,1,0] -233 hdfs://### HDFS PATH ### 5140 [1,2,4,8589934592,1,0] -235 hdfs://### HDFS PATH ### 4046 [1,2,4,8589934592,1,0] -237 hdfs://### HDFS PATH ### 4722 [1,2,4,8589934592,1,0] -237 hdfs://### HDFS PATH ### 574 [1,2,4,8589934592,1,0] -238 hdfs://### HDFS PATH ### 0 [1,2,4,8589934592,1,0] -238 hdfs://### HDFS PATH ### 2746 [1,2,4,8589934592,1,0] -239 hdfs://### HDFS PATH ### 1496 [1,2,4,8589934592,1,0] -239 hdfs://### HDFS PATH ### 3722 [1,2,4,8589934592,1,0] -24 hdfs://### HDFS PATH ### 1972 [1,2,4,8589934592,1,0] -24 hdfs://### HDFS PATH ### 4594 [1,2,4,8589934592,1,0] -241 hdfs://### HDFS PATH ### 1662 [1,2,4,8589934592,1,0] -242 hdfs://### HDFS PATH ### 2940 [1,2,4,8589934592,1,0] -242 hdfs://### HDFS PATH ### 3012 [1,2,4,8589934592,1,0] -244 hdfs://### HDFS PATH ### 3872 [1,2,4,8589934592,1,0] -247 hdfs://### HDFS PATH ### 718 [1,2,4,8589934592,1,0] -248 hdfs://### HDFS PATH ### 4758 [1,2,4,8589934592,1,0] -249 hdfs://### HDFS PATH ### 5034 [1,2,4,8589934592,1,0] -252 hdfs://### HDFS PATH ### 454 [1,2,4,8589934592,1,0] -255 hdfs://### HDFS PATH ### 4616 [1,2,4,8589934592,1,0] -255 hdfs://### HDFS PATH ### 68 [1,2,4,8589934592,1,0] -256 hdfs://### HDFS PATH ### 3770 [1,2,4,8589934592,1,0] -256 hdfs://### HDFS PATH ### 5272 [1,2,4,8589934592,1,0] -257 hdfs://### HDFS PATH ### 4208 [1,2,4,8589934592,1,0] -258 hdfs://### HDFS PATH ### 4292 [1,2,4,8589934592,1,0] -26 hdfs://### HDFS PATH ### 2226 [1,2,4,8589934592,1,0] -26 hdfs://### HDFS PATH ### 5284 [1,2,4,8589934592,1,0] -260 hdfs://### HDFS PATH ### 1764 [1,2,4,8589934592,1,0] -262 hdfs://### HDFS PATH ### 4326 [1,2,4,8589934592,1,0] -263 hdfs://### HDFS PATH ### 3782 [1,2,4,8589934592,1,0] -265 hdfs://### HDFS PATH ### 114 [1,2,4,8589934592,1,0] -265 hdfs://### HDFS PATH ### 5046 [1,2,4,8589934592,1,0] -266 hdfs://### HDFS PATH ### 814 [1,2,4,8589934592,1,0] -27 hdfs://### HDFS PATH ### 34 [1,2,4,8589934592,1,0] -272 hdfs://### HDFS PATH ### 1836 [1,2,4,8589934592,1,0] -272 hdfs://### HDFS PATH ### 2976 [1,2,4,8589934592,1,0] -273 hdfs://### HDFS PATH ### 162 [1,2,4,8589934592,1,0] -273 hdfs://### HDFS PATH ### 2868 [1,2,4,8589934592,1,0] -273 hdfs://### HDFS PATH ### 5524 [1,2,4,8589934592,1,0] -274 hdfs://### HDFS PATH ### 3698 [1,2,4,8589934592,1,0] -275 hdfs://### HDFS PATH ### 1638 [1,2,4,8589934592,1,0] -277 hdfs://### HDFS PATH ### 1260 [1,2,4,8589934592,1,0] -277 hdfs://### HDFS PATH ### 2856 [1,2,4,8589934592,1,0] -277 hdfs://### HDFS PATH ### 362 [1,2,4,8589934592,1,0] -277 hdfs://### HDFS PATH ### 4902 [1,2,4,8589934592,1,0] -278 hdfs://### HDFS PATH ### 1544 [1,2,4,8589934592,1,0] -278 hdfs://### HDFS PATH ### 80 [1,2,4,8589934592,1,0] -28 hdfs://### HDFS PATH ### 5616 [1,2,4,8589934592,1,0] -280 hdfs://### HDFS PATH ### 1226 [1,2,4,8589934592,1,0] -280 hdfs://### HDFS PATH ### 3992 [1,2,4,8589934592,1,0] -281 hdfs://### HDFS PATH ### 350 [1,2,4,8589934592,1,0] -281 hdfs://### HDFS PATH ### 5548 [1,2,4,8589934592,1,0] -282 hdfs://### HDFS PATH ### 2468 [1,2,4,8589934592,1,0] -282 hdfs://### HDFS PATH ### 2722 [1,2,4,8589934592,1,0] -283 hdfs://### HDFS PATH ### 4022 [1,2,4,8589934592,1,0] -284 hdfs://### HDFS PATH ### 1708 [1,2,4,8589934592,1,0] -285 hdfs://### HDFS PATH ### 5478 [1,2,4,8589934592,1,0] -286 hdfs://### HDFS PATH ### 1404 [1,2,4,8589934592,1,0] -287 hdfs://### HDFS PATH ### 490 [1,2,4,8589934592,1,0] -288 hdfs://### HDFS PATH ### 2422 [1,2,4,8589934592,1,0] -288 hdfs://### HDFS PATH ### 3840 [1,2,4,8589934592,1,0] -289 hdfs://### HDFS PATH ### 1568 [1,2,4,8589934592,1,0] -291 hdfs://### HDFS PATH ### 4582 [1,2,4,8589934592,1,0] -292 hdfs://### HDFS PATH ### 466 [1,2,4,8589934592,1,0] -296 hdfs://### HDFS PATH ### 3626 [1,2,4,8589934592,1,0] -298 hdfs://### HDFS PATH ### 2168 [1,2,4,8589934592,1,0] -298 hdfs://### HDFS PATH ### 4456 [1,2,4,8589934592,1,0] -298 hdfs://### HDFS PATH ### 5386 [1,2,4,8589934592,1,0] -30 hdfs://### HDFS PATH ### 3494 [1,2,4,8589934592,1,0] -302 hdfs://### HDFS PATH ### 1034 [1,2,4,8589934592,1,0] -305 hdfs://### HDFS PATH ### 4782 [1,2,4,8589934592,1,0] -306 hdfs://### HDFS PATH ### 2880 [1,2,4,8589934592,1,0] -307 hdfs://### HDFS PATH ### 2812 [1,2,4,8589934592,1,0] -307 hdfs://### HDFS PATH ### 5672 [1,2,4,8589934592,1,0] -308 hdfs://### HDFS PATH ### 2388 [1,2,4,8589934592,1,0] -309 hdfs://### HDFS PATH ### 2904 [1,2,4,8589934592,1,0] -309 hdfs://### HDFS PATH ### 790 [1,2,4,8589934592,1,0] -310 hdfs://### HDFS PATH ### 4962 [1,2,4,8589934592,1,0] -311 hdfs://### HDFS PATH ### 1000 [1,2,4,8589934592,1,0] -311 hdfs://### HDFS PATH ### 1626 [1,2,4,8589934592,1,0] -311 hdfs://### HDFS PATH ### 22 [1,2,4,8589934592,1,0] -315 hdfs://### HDFS PATH ### 5594 [1,2,4,8589934592,1,0] -316 hdfs://### HDFS PATH ### 1012 [1,2,4,8589934592,1,0] -316 hdfs://### HDFS PATH ### 2576 [1,2,4,8589934592,1,0] -316 hdfs://### HDFS PATH ### 3944 [1,2,4,8589934592,1,0] -317 hdfs://### HDFS PATH ### 3104 [1,2,4,8589934592,1,0] -317 hdfs://### HDFS PATH ### 4974 [1,2,4,8589934592,1,0] -318 hdfs://### HDFS PATH ### 1602 [1,2,4,8589934592,1,0] -318 hdfs://### HDFS PATH ### 2504 [1,2,4,8589934592,1,0] -318 hdfs://### HDFS PATH ### 2516 [1,2,4,8589934592,1,0] -321 hdfs://### HDFS PATH ### 3308 [1,2,4,8589934592,1,0] -321 hdfs://### HDFS PATH ### 4090 [1,2,4,8589934592,1,0] -322 hdfs://### HDFS PATH ### 2096 [1,2,4,8589934592,1,0] -322 hdfs://### HDFS PATH ### 3250 [1,2,4,8589934592,1,0] -323 hdfs://### HDFS PATH ### 4878 [1,2,4,8589934592,1,0] -325 hdfs://### HDFS PATH ### 4890 [1,2,4,8589934592,1,0] -325 hdfs://### HDFS PATH ### 862 [1,2,4,8589934592,1,0] -327 hdfs://### HDFS PATH ### 2248 [1,2,4,8589934592,1,0] -327 hdfs://### HDFS PATH ### 2928 [1,2,4,8589934592,1,0] -327 hdfs://### HDFS PATH ### 338 [1,2,4,8589934592,1,0] -33 hdfs://### HDFS PATH ### 3592 [1,2,4,8589934592,1,0] -331 hdfs://### HDFS PATH ### 2988 [1,2,4,8589934592,1,0] -331 hdfs://### HDFS PATH ### 4034 [1,2,4,8589934592,1,0] -332 hdfs://### HDFS PATH ### 1614 [1,2,4,8589934592,1,0] -333 hdfs://### HDFS PATH ### 1684 [1,2,4,8589934592,1,0] -333 hdfs://### HDFS PATH ### 4986 [1,2,4,8589934592,1,0] -335 hdfs://### HDFS PATH ### 4102 [1,2,4,8589934592,1,0] -336 hdfs://### HDFS PATH ### 3148 [1,2,4,8589934592,1,0] -338 hdfs://### HDFS PATH ### 526 [1,2,4,8589934592,1,0] -339 hdfs://### HDFS PATH ### 956 [1,2,4,8589934592,1,0] -34 hdfs://### HDFS PATH ### 3192 [1,2,4,8589934592,1,0] -341 hdfs://### HDFS PATH ### 5406 [1,2,4,8589934592,1,0] -342 hdfs://### HDFS PATH ### 3558 [1,2,4,8589934592,1,0] -342 hdfs://### HDFS PATH ### 838 [1,2,4,8589934592,1,0] -344 hdfs://### HDFS PATH ### 3674 [1,2,4,8589934592,1,0] -344 hdfs://### HDFS PATH ### 5560 [1,2,4,8589934592,1,0] -345 hdfs://### HDFS PATH ### 1082 [1,2,4,8589934592,1,0] -348 hdfs://### HDFS PATH ### 1882 [1,2,4,8589934592,1,0] -348 hdfs://### HDFS PATH ### 1960 [1,2,4,8589934592,1,0] -348 hdfs://### HDFS PATH ### 4338 [1,2,4,8589934592,1,0] -348 hdfs://### HDFS PATH ### 5490 [1,2,4,8589934592,1,0] -348 hdfs://### HDFS PATH ### 5660 [1,2,4,8589934592,1,0] -35 hdfs://### HDFS PATH ### 1238 [1,2,4,8589934592,1,0] -35 hdfs://### HDFS PATH ### 3138 [1,2,4,8589934592,1,0] -35 hdfs://### HDFS PATH ### 4012 [1,2,4,8589934592,1,0] -351 hdfs://### HDFS PATH ### 4604 [1,2,4,8589934592,1,0] -353 hdfs://### HDFS PATH ### 1812 [1,2,4,8589934592,1,0] -353 hdfs://### HDFS PATH ### 5092 [1,2,4,8589934592,1,0] -356 hdfs://### HDFS PATH ### 1284 [1,2,4,8589934592,1,0] -360 hdfs://### HDFS PATH ### 4746 [1,2,4,8589934592,1,0] -362 hdfs://### HDFS PATH ### 5454 [1,2,4,8589934592,1,0] -364 hdfs://### HDFS PATH ### 2662 [1,2,4,8589934592,1,0] -365 hdfs://### HDFS PATH ### 802 [1,2,4,8589934592,1,0] -366 hdfs://### HDFS PATH ### 4138 [1,2,4,8589934592,1,0] -367 hdfs://### HDFS PATH ### 3662 [1,2,4,8589934592,1,0] -367 hdfs://### HDFS PATH ### 850 [1,2,4,8589934592,1,0] -368 hdfs://### HDFS PATH ### 3602 [1,2,4,8589934592,1,0] -369 hdfs://### HDFS PATH ### 186 [1,2,4,8589934592,1,0] -369 hdfs://### HDFS PATH ### 2564 [1,2,4,8589934592,1,0] -369 hdfs://### HDFS PATH ### 2952 [1,2,4,8589934592,1,0] -37 hdfs://### HDFS PATH ### 328 [1,2,4,8589934592,1,0] -37 hdfs://### HDFS PATH ### 5626 [1,2,4,8589934592,1,0] -373 hdfs://### HDFS PATH ### 1824 [1,2,4,8589934592,1,0] -374 hdfs://### HDFS PATH ### 268 [1,2,4,8589934592,1,0] -375 hdfs://### HDFS PATH ### 5212 [1,2,4,8589934592,1,0] -377 hdfs://### HDFS PATH ### 766 [1,2,4,8589934592,1,0] -378 hdfs://### HDFS PATH ### 1152 [1,2,4,8589934592,1,0] -379 hdfs://### HDFS PATH ### 5328 [1,2,4,8589934592,1,0] -382 hdfs://### HDFS PATH ### 1320 [1,2,4,8589934592,1,0] -382 hdfs://### HDFS PATH ### 4528 [1,2,4,8589934592,1,0] -384 hdfs://### HDFS PATH ### 1788 [1,2,4,8589934592,1,0] -384 hdfs://### HDFS PATH ### 5260 [1,2,4,8589934592,1,0] -384 hdfs://### HDFS PATH ### 5316 [1,2,4,8589934592,1,0] -386 hdfs://### HDFS PATH ### 1356 [1,2,4,8589934592,1,0] -389 hdfs://### HDFS PATH ### 2916 [1,2,4,8589934592,1,0] -392 hdfs://### HDFS PATH ### 2964 [1,2,4,8589934592,1,0] -393 hdfs://### HDFS PATH ### 2132 [1,2,4,8589934592,1,0] -394 hdfs://### HDFS PATH ### 562 [1,2,4,8589934592,1,0] -395 hdfs://### HDFS PATH ### 2710 [1,2,4,8589934592,1,0] -395 hdfs://### HDFS PATH ### 3116 [1,2,4,8589934592,1,0] -396 hdfs://### HDFS PATH ### 3092 [1,2,4,8589934592,1,0] -396 hdfs://### HDFS PATH ### 4372 [1,2,4,8589934592,1,0] -396 hdfs://### HDFS PATH ### 706 [1,2,4,8589934592,1,0] -397 hdfs://### HDFS PATH ### 4558 [1,2,4,8589934592,1,0] -397 hdfs://### HDFS PATH ### 778 [1,2,4,8589934592,1,0] -399 hdfs://### HDFS PATH ### 1296 [1,2,4,8589934592,1,0] -399 hdfs://### HDFS PATH ### 694 [1,2,4,8589934592,1,0] -4 hdfs://### HDFS PATH ### 1218 [1,2,4,8589934592,1,0] -400 hdfs://### HDFS PATH ### 5778 [1,2,4,8589934592,1,0] -401 hdfs://### HDFS PATH ### 138 [1,2,4,8589934592,1,0] -401 hdfs://### HDFS PATH ### 3000 [1,2,4,8589934592,1,0] -401 hdfs://### HDFS PATH ### 3828 [1,2,4,8589934592,1,0] -401 hdfs://### HDFS PATH ### 4268 [1,2,4,8589934592,1,0] -401 hdfs://### HDFS PATH ### 5224 [1,2,4,8589934592,1,0] -402 hdfs://### HDFS PATH ### 3080 [1,2,4,8589934592,1,0] -403 hdfs://### HDFS PATH ### 406 [1,2,4,8589934592,1,0] -403 hdfs://### HDFS PATH ### 4162 [1,2,4,8589934592,1,0] -403 hdfs://### HDFS PATH ### 5766 [1,2,4,8589934592,1,0] -404 hdfs://### HDFS PATH ### 1776 [1,2,4,8589934592,1,0] -404 hdfs://### HDFS PATH ### 2318 [1,2,4,8589934592,1,0] -406 hdfs://### HDFS PATH ### 244 [1,2,4,8589934592,1,0] -406 hdfs://### HDFS PATH ### 4220 [1,2,4,8589934592,1,0] -406 hdfs://### HDFS PATH ### 4256 [1,2,4,8589934592,1,0] -406 hdfs://### HDFS PATH ### 5152 [1,2,4,8589934592,1,0] -407 hdfs://### HDFS PATH ### 5248 [1,2,4,8589934592,1,0] -409 hdfs://### HDFS PATH ### 2528 [1,2,4,8589934592,1,0] -409 hdfs://### HDFS PATH ### 4232 [1,2,4,8589934592,1,0] -409 hdfs://### HDFS PATH ### 56 [1,2,4,8589934592,1,0] -41 hdfs://### HDFS PATH ### 3388 [1,2,4,8589934592,1,0] -411 hdfs://### HDFS PATH ### 1924 [1,2,4,8589934592,1,0] -413 hdfs://### HDFS PATH ### 2600 [1,2,4,8589934592,1,0] -413 hdfs://### HDFS PATH ### 610 [1,2,4,8589934592,1,0] -414 hdfs://### HDFS PATH ### 4686 [1,2,4,8589934592,1,0] -414 hdfs://### HDFS PATH ### 5696 [1,2,4,8589934592,1,0] -417 hdfs://### HDFS PATH ### 430 [1,2,4,8589934592,1,0] -417 hdfs://### HDFS PATH ### 4794 [1,2,4,8589934592,1,0] -417 hdfs://### HDFS PATH ### 730 [1,2,4,8589934592,1,0] -418 hdfs://### HDFS PATH ### 2204 [1,2,4,8589934592,1,0] -419 hdfs://### HDFS PATH ### 2758 [1,2,4,8589934592,1,0] -42 hdfs://### HDFS PATH ### 2030 [1,2,4,8589934592,1,0] -42 hdfs://### HDFS PATH ### 3298 [1,2,4,8589934592,1,0] -421 hdfs://### HDFS PATH ### 5236 [1,2,4,8589934592,1,0] -424 hdfs://### HDFS PATH ### 4350 [1,2,4,8589934592,1,0] -424 hdfs://### HDFS PATH ### 4504 [1,2,4,8589934592,1,0] -427 hdfs://### HDFS PATH ### 1248 [1,2,4,8589934592,1,0] -429 hdfs://### HDFS PATH ### 256 [1,2,4,8589934592,1,0] -429 hdfs://### HDFS PATH ### 4842 [1,2,4,8589934592,1,0] -43 hdfs://### HDFS PATH ### 2330 [1,2,4,8589934592,1,0] -430 hdfs://### HDFS PATH ### 1532 [1,2,4,8589934592,1,0] -430 hdfs://### HDFS PATH ### 3320 [1,2,4,8589934592,1,0] -430 hdfs://### HDFS PATH ### 442 [1,2,4,8589934592,1,0] -431 hdfs://### HDFS PATH ### 1994 [1,2,4,8589934592,1,0] -431 hdfs://### HDFS PATH ### 4420 [1,2,4,8589934592,1,0] -431 hdfs://### HDFS PATH ### 4480 [1,2,4,8589934592,1,0] -432 hdfs://### HDFS PATH ### 3920 [1,2,4,8589934592,1,0] -435 hdfs://### HDFS PATH ### 2834 [1,2,4,8589934592,1,0] -436 hdfs://### HDFS PATH ### 2340 [1,2,4,8589934592,1,0] -437 hdfs://### HDFS PATH ### 1368 [1,2,4,8589934592,1,0] -438 hdfs://### HDFS PATH ### 1070 [1,2,4,8589934592,1,0] -438 hdfs://### HDFS PATH ### 3884 [1,2,4,8589934592,1,0] -438 hdfs://### HDFS PATH ### 4662 [1,2,4,8589934592,1,0] -439 hdfs://### HDFS PATH ### 4734 [1,2,4,8589934592,1,0] -439 hdfs://### HDFS PATH ### 826 [1,2,4,8589934592,1,0] -44 hdfs://### HDFS PATH ### 4068 [1,2,4,8589934592,1,0] -443 hdfs://### HDFS PATH ### 4866 [1,2,4,8589934592,1,0] -444 hdfs://### HDFS PATH ### 4818 [1,2,4,8589934592,1,0] -446 hdfs://### HDFS PATH ### 538 [1,2,4,8589934592,1,0] -448 hdfs://### HDFS PATH ### 5636 [1,2,4,8589934592,1,0] -449 hdfs://### HDFS PATH ### 3434 [1,2,4,8589934592,1,0] -452 hdfs://### HDFS PATH ### 3024 [1,2,4,8589934592,1,0] -453 hdfs://### HDFS PATH ### 3482 [1,2,4,8589934592,1,0] -454 hdfs://### HDFS PATH ### 2144 [1,2,4,8589934592,1,0] -454 hdfs://### HDFS PATH ### 4432 [1,2,4,8589934592,1,0] -454 hdfs://### HDFS PATH ### 5200 [1,2,4,8589934592,1,0] -455 hdfs://### HDFS PATH ### 976 [1,2,4,8589934592,1,0] -457 hdfs://### HDFS PATH ### 2446 [1,2,4,8589934592,1,0] -458 hdfs://### HDFS PATH ### 3356 [1,2,4,8589934592,1,0] -458 hdfs://### HDFS PATH ### 5442 [1,2,4,8589934592,1,0] -459 hdfs://### HDFS PATH ### 1450 [1,2,4,8589934592,1,0] -459 hdfs://### HDFS PATH ### 550 [1,2,4,8589934592,1,0] -460 hdfs://### HDFS PATH ### 5010 [1,2,4,8589934592,1,0] -462 hdfs://### HDFS PATH ### 5128 [1,2,4,8589934592,1,0] -462 hdfs://### HDFS PATH ### 5350 [1,2,4,8589934592,1,0] -463 hdfs://### HDFS PATH ### 1982 [1,2,4,8589934592,1,0] -463 hdfs://### HDFS PATH ### 3980 [1,2,4,8589934592,1,0] -466 hdfs://### HDFS PATH ### 1894 [1,2,4,8589934592,1,0] -466 hdfs://### HDFS PATH ### 4126 [1,2,4,8589934592,1,0] -466 hdfs://### HDFS PATH ### 658 [1,2,4,8589934592,1,0] -467 hdfs://### HDFS PATH ### 3908 [1,2,4,8589934592,1,0] -468 hdfs://### HDFS PATH ### 2120 [1,2,4,8589934592,1,0] -468 hdfs://### HDFS PATH ### 2376 [1,2,4,8589934592,1,0] -468 hdfs://### HDFS PATH ### 3526 [1,2,4,8589934592,1,0] -468 hdfs://### HDFS PATH ### 4950 [1,2,4,8589934592,1,0] -469 hdfs://### HDFS PATH ### 1380 [1,2,4,8589934592,1,0] -469 hdfs://### HDFS PATH ### 2364 [1,2,4,8589934592,1,0] -469 hdfs://### HDFS PATH ### 292 [1,2,4,8589934592,1,0] -469 hdfs://### HDFS PATH ### 3968 [1,2,4,8589934592,1,0] -469 hdfs://### HDFS PATH ### 5582 [1,2,4,8589934592,1,0] -47 hdfs://### HDFS PATH ### 1198 [1,2,4,8589934592,1,0] -470 hdfs://### HDFS PATH ### 2540 [1,2,4,8589934592,1,0] -472 hdfs://### HDFS PATH ### 3238 [1,2,4,8589934592,1,0] -475 hdfs://### HDFS PATH ### 898 [1,2,4,8589934592,1,0] -477 hdfs://### HDFS PATH ### 5708 [1,2,4,8589934592,1,0] -478 hdfs://### HDFS PATH ### 4444 [1,2,4,8589934592,1,0] -478 hdfs://### HDFS PATH ### 4926 [1,2,4,8589934592,1,0] -479 hdfs://### HDFS PATH ### 4770 [1,2,4,8589934592,1,0] -480 hdfs://### HDFS PATH ### 3816 [1,2,4,8589934592,1,0] -480 hdfs://### HDFS PATH ### 4570 [1,2,4,8589934592,1,0] -480 hdfs://### HDFS PATH ### 5058 [1,2,4,8589934592,1,0] -481 hdfs://### HDFS PATH ### 2434 [1,2,4,8589934592,1,0] -482 hdfs://### HDFS PATH ### 586 [1,2,4,8589934592,1,0] -483 hdfs://### HDFS PATH ### 4174 [1,2,4,8589934592,1,0] -484 hdfs://### HDFS PATH ### 102 [1,2,4,8589934592,1,0] -485 hdfs://### HDFS PATH ### 3734 [1,2,4,8589934592,1,0] -487 hdfs://### HDFS PATH ### 3804 [1,2,4,8589934592,1,0] -489 hdfs://### HDFS PATH ### 1128 [1,2,4,8589934592,1,0] -489 hdfs://### HDFS PATH ### 1800 [1,2,4,8589934592,1,0] -489 hdfs://### HDFS PATH ### 3344 [1,2,4,8589934592,1,0] -489 hdfs://### HDFS PATH ### 742 [1,2,4,8589934592,1,0] -490 hdfs://### HDFS PATH ### 2640 [1,2,4,8589934592,1,0] -491 hdfs://### HDFS PATH ### 4710 [1,2,4,8589934592,1,0] -492 hdfs://### HDFS PATH ### 3410 [1,2,4,8589934592,1,0] -492 hdfs://### HDFS PATH ### 5362 [1,2,4,8589934592,1,0] -493 hdfs://### HDFS PATH ### 4998 [1,2,4,8589934592,1,0] -494 hdfs://### HDFS PATH ### 622 [1,2,4,8589934592,1,0] -495 hdfs://### HDFS PATH ### 316 [1,2,4,8589934592,1,0] -496 hdfs://### HDFS PATH ### 2076 [1,2,4,8589934592,1,0] -497 hdfs://### HDFS PATH ### 3068 [1,2,4,8589934592,1,0] -498 hdfs://### HDFS PATH ### 1332 [1,2,4,8589934592,1,0] -498 hdfs://### HDFS PATH ### 3262 [1,2,4,8589934592,1,0] -498 hdfs://### HDFS PATH ### 5418 [1,2,4,8589934592,1,0] -5 hdfs://### HDFS PATH ### 3060 [1,2,4,8589934592,1,0] -5 hdfs://### HDFS PATH ### 3864 [1,2,4,8589934592,1,0] -5 hdfs://### HDFS PATH ### 4540 [1,2,4,8589934592,1,0] -51 hdfs://### HDFS PATH ### 1462 [1,2,4,8589934592,1,0] -51 hdfs://### HDFS PATH ### 2308 [1,2,4,8589934592,1,0] -53 hdfs://### HDFS PATH ### 4186 [1,2,4,8589934592,1,0] -54 hdfs://### HDFS PATH ### 1440 [1,2,4,8589934592,1,0] -57 hdfs://### HDFS PATH ### 1024 [1,2,4,8589934592,1,0] -58 hdfs://### HDFS PATH ### 1906 [1,2,4,8589934592,1,0] -58 hdfs://### HDFS PATH ### 3128 [1,2,4,8589934592,1,0] -64 hdfs://### HDFS PATH ### 3516 [1,2,4,8589934592,1,0] -65 hdfs://### HDFS PATH ### 1592 [1,2,4,8589934592,1,0] -66 hdfs://### HDFS PATH ### 198 [1,2,4,8589934592,1,0] -67 hdfs://### HDFS PATH ### 1754 [1,2,4,8589934592,1,0] -67 hdfs://### HDFS PATH ### 5306 [1,2,4,8589934592,1,0] -69 hdfs://### HDFS PATH ### 3570 [1,2,4,8589934592,1,0] -70 hdfs://### HDFS PATH ### 3794 [1,2,4,8589934592,1,0] -70 hdfs://### HDFS PATH ### 4548 [1,2,4,8589934592,1,0] -70 hdfs://### HDFS PATH ### 4640 [1,2,4,8589934592,1,0] -72 hdfs://### HDFS PATH ### 1208 [1,2,4,8589934592,1,0] -72 hdfs://### HDFS PATH ### 2792 [1,2,4,8589934592,1,0] -74 hdfs://### HDFS PATH ### 3548 [1,2,4,8589934592,1,0] -76 hdfs://### HDFS PATH ### 3378 [1,2,4,8589934592,1,0] -76 hdfs://### HDFS PATH ### 3538 [1,2,4,8589934592,1,0] -77 hdfs://### HDFS PATH ### 2622 [1,2,4,8589934592,1,0] -78 hdfs://### HDFS PATH ### 3368 [1,2,4,8589934592,1,0] -8 hdfs://### HDFS PATH ### 1916 [1,2,4,8589934592,1,0] -80 hdfs://### HDFS PATH ### 4058 [1,2,4,8589934592,1,0] -82 hdfs://### HDFS PATH ### 396 [1,2,4,8589934592,1,0] -83 hdfs://### HDFS PATH ### 1674 [1,2,4,8589934592,1,0] -83 hdfs://### HDFS PATH ### 5070 [1,2,4,8589934592,1,0] -84 hdfs://### HDFS PATH ### 1872 [1,2,4,8589934592,1,0] -84 hdfs://### HDFS PATH ### 5606 [1,2,4,8589934592,1,0] -85 hdfs://### HDFS PATH ### 2612 [1,2,4,8589934592,1,0] -86 hdfs://### HDFS PATH ### 12 [1,2,4,8589934592,1,0] -87 hdfs://### HDFS PATH ### 2652 [1,2,4,8589934592,1,0] -9 hdfs://### HDFS PATH ### 5398 [1,2,4,8589934592,1,0] -90 hdfs://### HDFS PATH ### 2802 [1,2,4,8589934592,1,0] -90 hdfs://### HDFS PATH ### 4304 [1,2,4,8589934592,1,0] -90 hdfs://### HDFS PATH ### 5744 [1,2,4,8589934592,1,0] -92 hdfs://### HDFS PATH ### 1176 [1,2,4,8589934592,1,0] -95 hdfs://### HDFS PATH ### 2400 [1,2,4,8589934592,1,0] -95 hdfs://### HDFS PATH ### 3160 [1,2,4,8589934592,1,0] -96 hdfs://### HDFS PATH ### 2216 [1,2,4,8589934592,1,0] -97 hdfs://### HDFS PATH ### 5572 [1,2,4,8589934592,1,0] -97 hdfs://### HDFS PATH ### 5802 [1,2,4,8589934592,1,0] -98 hdfs://### HDFS PATH ### 2458 [1,2,4,8589934592,1,0] -98 hdfs://### HDFS PATH ### 92 [1,2,4,8589934592,1,0] -PREHOOK: query: SELECT * FROM default__src_src2_index__ -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src2_index__ -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT * FROM default__src_src2_index__ -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src2_index__ -POSTHOOK: Output: hdfs://### HDFS PATH ### -val_0 hdfs://### HDFS PATH ### 2088 [1,2,4,8589934592,1,0] -val_0 hdfs://### HDFS PATH ### 2632 [1,2,4,8589934592,1,0] -val_0 hdfs://### HDFS PATH ### 968 [1,2,4,8589934592,1,0] -val_10 hdfs://### HDFS PATH ### 2846 [1,2,4,8589934592,1,0] -val_100 hdfs://### HDFS PATH ### 2156 [1,2,4,8589934592,1,0] -val_100 hdfs://### HDFS PATH ### 5374 [1,2,4,8589934592,1,0] -val_103 hdfs://### HDFS PATH ### 1484 [1,2,4,8589934592,1,0] -val_103 hdfs://### HDFS PATH ### 3614 [1,2,4,8589934592,1,0] -val_104 hdfs://### HDFS PATH ### 4114 [1,2,4,8589934592,1,0] -val_104 hdfs://### HDFS PATH ### 4628 [1,2,4,8589934592,1,0] -val_105 hdfs://### HDFS PATH ### 4196 [1,2,4,8589934592,1,0] -val_11 hdfs://### HDFS PATH ### 3170 [1,2,4,8589934592,1,0] -val_111 hdfs://### HDFS PATH ### 1186 [1,2,4,8589934592,1,0] -val_113 hdfs://### HDFS PATH ### 3638 [1,2,4,8589934592,1,0] -val_113 hdfs://### HDFS PATH ### 920 [1,2,4,8589934592,1,0] -val_114 hdfs://### HDFS PATH ### 4280 [1,2,4,8589934592,1,0] -val_116 hdfs://### HDFS PATH ### 3746 [1,2,4,8589934592,1,0] -val_118 hdfs://### HDFS PATH ### 2686 [1,2,4,8589934592,1,0] -val_118 hdfs://### HDFS PATH ### 2780 [1,2,4,8589934592,1,0] -val_119 hdfs://### HDFS PATH ### 2064 [1,2,4,8589934592,1,0] -val_119 hdfs://### HDFS PATH ### 3332 [1,2,4,8589934592,1,0] -val_119 hdfs://### HDFS PATH ### 4674 [1,2,4,8589934592,1,0] -val_12 hdfs://### HDFS PATH ### 1720 [1,2,4,8589934592,1,0] -val_12 hdfs://### HDFS PATH ### 4362 [1,2,4,8589934592,1,0] -val_120 hdfs://### HDFS PATH ### 2284 [1,2,4,8589934592,1,0] -val_120 hdfs://### HDFS PATH ### 4830 [1,2,4,8589934592,1,0] -val_125 hdfs://### HDFS PATH ### 1344 [1,2,4,8589934592,1,0] -val_125 hdfs://### HDFS PATH ### 4468 [1,2,4,8589934592,1,0] -val_126 hdfs://### HDFS PATH ### 5732 [1,2,4,8589934592,1,0] -val_128 hdfs://### HDFS PATH ### 208 [1,2,4,8589934592,1,0] -val_128 hdfs://### HDFS PATH ### 3896 [1,2,4,8589934592,1,0] -val_128 hdfs://### HDFS PATH ### 988 [1,2,4,8589934592,1,0] -val_129 hdfs://### HDFS PATH ### 1094 [1,2,4,8589934592,1,0] -val_129 hdfs://### HDFS PATH ### 2040 [1,2,4,8589934592,1,0] -val_131 hdfs://### HDFS PATH ### 2296 [1,2,4,8589934592,1,0] -val_133 hdfs://### HDFS PATH ### 5164 [1,2,4,8589934592,1,0] -val_134 hdfs://### HDFS PATH ### 2698 [1,2,4,8589934592,1,0] -val_134 hdfs://### HDFS PATH ### 5294 [1,2,4,8589934592,1,0] -val_136 hdfs://### HDFS PATH ### 5080 [1,2,4,8589934592,1,0] -val_137 hdfs://### HDFS PATH ### 1650 [1,2,4,8589934592,1,0] -val_137 hdfs://### HDFS PATH ### 2552 [1,2,4,8589934592,1,0] -val_138 hdfs://### HDFS PATH ### 1472 [1,2,4,8589934592,1,0] -val_138 hdfs://### HDFS PATH ### 1848 [1,2,4,8589934592,1,0] -val_138 hdfs://### HDFS PATH ### 2734 [1,2,4,8589934592,1,0] -val_138 hdfs://### HDFS PATH ### 3470 [1,2,4,8589934592,1,0] -val_143 hdfs://### HDFS PATH ### 3226 [1,2,4,8589934592,1,0] -val_145 hdfs://### HDFS PATH ### 304 [1,2,4,8589934592,1,0] -val_146 hdfs://### HDFS PATH ### 232 [1,2,4,8589934592,1,0] -val_146 hdfs://### HDFS PATH ### 5430 [1,2,4,8589934592,1,0] -val_149 hdfs://### HDFS PATH ### 1058 [1,2,4,8589934592,1,0] -val_149 hdfs://### HDFS PATH ### 3422 [1,2,4,8589934592,1,0] -val_15 hdfs://### HDFS PATH ### 2770 [1,2,4,8589934592,1,0] -val_15 hdfs://### HDFS PATH ### 386 [1,2,4,8589934592,1,0] -val_150 hdfs://### HDFS PATH ### 150 [1,2,4,8589934592,1,0] -val_152 hdfs://### HDFS PATH ### 280 [1,2,4,8589934592,1,0] -val_152 hdfs://### HDFS PATH ### 5648 [1,2,4,8589934592,1,0] -val_153 hdfs://### HDFS PATH ### 502 [1,2,4,8589934592,1,0] -val_155 hdfs://### HDFS PATH ### 932 [1,2,4,8589934592,1,0] -val_156 hdfs://### HDFS PATH ### 2352 [1,2,4,8589934592,1,0] -val_157 hdfs://### HDFS PATH ### 1140 [1,2,4,8589934592,1,0] -val_158 hdfs://### HDFS PATH ### 2052 [1,2,4,8589934592,1,0] -val_160 hdfs://### HDFS PATH ### 3274 [1,2,4,8589934592,1,0] -val_162 hdfs://### HDFS PATH ### 754 [1,2,4,8589934592,1,0] -val_163 hdfs://### HDFS PATH ### 4650 [1,2,4,8589934592,1,0] -val_164 hdfs://### HDFS PATH ### 4408 [1,2,4,8589934592,1,0] -val_164 hdfs://### HDFS PATH ### 4492 [1,2,4,8589934592,1,0] -val_165 hdfs://### HDFS PATH ### 2236 [1,2,4,8589934592,1,0] -val_165 hdfs://### HDFS PATH ### 44 [1,2,4,8589934592,1,0] -val_166 hdfs://### HDFS PATH ### 418 [1,2,4,8589934592,1,0] -val_167 hdfs://### HDFS PATH ### 3686 [1,2,4,8589934592,1,0] -val_167 hdfs://### HDFS PATH ### 5502 [1,2,4,8589934592,1,0] -val_167 hdfs://### HDFS PATH ### 874 [1,2,4,8589934592,1,0] -val_168 hdfs://### HDFS PATH ### 3180 [1,2,4,8589934592,1,0] -val_169 hdfs://### HDFS PATH ### 1308 [1,2,4,8589934592,1,0] -val_169 hdfs://### HDFS PATH ### 2588 [1,2,4,8589934592,1,0] -val_169 hdfs://### HDFS PATH ### 4854 [1,2,4,8589934592,1,0] -val_169 hdfs://### HDFS PATH ### 5754 [1,2,4,8589934592,1,0] -val_17 hdfs://### HDFS PATH ### 910 [1,2,4,8589934592,1,0] -val_170 hdfs://### HDFS PATH ### 1106 [1,2,4,8589934592,1,0] -val_172 hdfs://### HDFS PATH ### 2018 [1,2,4,8589934592,1,0] -val_172 hdfs://### HDFS PATH ### 5104 [1,2,4,8589934592,1,0] -val_174 hdfs://### HDFS PATH ### 598 [1,2,4,8589934592,1,0] -val_174 hdfs://### HDFS PATH ### 682 [1,2,4,8589934592,1,0] -val_175 hdfs://### HDFS PATH ### 4150 [1,2,4,8589934592,1,0] -val_175 hdfs://### HDFS PATH ### 5176 [1,2,4,8589934592,1,0] -val_176 hdfs://### HDFS PATH ### 1428 [1,2,4,8589934592,1,0] -val_176 hdfs://### HDFS PATH ### 1556 [1,2,4,8589934592,1,0] -val_177 hdfs://### HDFS PATH ### 3036 [1,2,4,8589934592,1,0] -val_178 hdfs://### HDFS PATH ### 4938 [1,2,4,8589934592,1,0] -val_179 hdfs://### HDFS PATH ### 2006 [1,2,4,8589934592,1,0] -val_179 hdfs://### HDFS PATH ### 2674 [1,2,4,8589934592,1,0] -val_18 hdfs://### HDFS PATH ### 5340 [1,2,4,8589934592,1,0] -val_18 hdfs://### HDFS PATH ### 5514 [1,2,4,8589934592,1,0] -val_180 hdfs://### HDFS PATH ### 1696 [1,2,4,8589934592,1,0] -val_181 hdfs://### HDFS PATH ### 1742 [1,2,4,8589934592,1,0] -val_183 hdfs://### HDFS PATH ### 5536 [1,2,4,8589934592,1,0] -val_186 hdfs://### HDFS PATH ### 5466 [1,2,4,8589934592,1,0] -val_187 hdfs://### HDFS PATH ### 1416 [1,2,4,8589934592,1,0] -val_187 hdfs://### HDFS PATH ### 2492 [1,2,4,8589934592,1,0] -val_187 hdfs://### HDFS PATH ### 4516 [1,2,4,8589934592,1,0] -val_189 hdfs://### HDFS PATH ### 5188 [1,2,4,8589934592,1,0] -val_19 hdfs://### HDFS PATH ### 2824 [1,2,4,8589934592,1,0] -val_190 hdfs://### HDFS PATH ### 4244 [1,2,4,8589934592,1,0] -val_191 hdfs://### HDFS PATH ### 2192 [1,2,4,8589934592,1,0] -val_191 hdfs://### HDFS PATH ### 3852 [1,2,4,8589934592,1,0] -val_192 hdfs://### HDFS PATH ### 1392 [1,2,4,8589934592,1,0] -val_193 hdfs://### HDFS PATH ### 126 [1,2,4,8589934592,1,0] -val_193 hdfs://### HDFS PATH ### 4078 [1,2,4,8589934592,1,0] -val_193 hdfs://### HDFS PATH ### 514 [1,2,4,8589934592,1,0] -val_194 hdfs://### HDFS PATH ### 5684 [1,2,4,8589934592,1,0] -val_195 hdfs://### HDFS PATH ### 3286 [1,2,4,8589934592,1,0] -val_195 hdfs://### HDFS PATH ### 886 [1,2,4,8589934592,1,0] -val_196 hdfs://### HDFS PATH ### 2410 [1,2,4,8589934592,1,0] -val_197 hdfs://### HDFS PATH ### 2108 [1,2,4,8589934592,1,0] -val_197 hdfs://### HDFS PATH ### 2480 [1,2,4,8589934592,1,0] -val_199 hdfs://### HDFS PATH ### 2180 [1,2,4,8589934592,1,0] -val_199 hdfs://### HDFS PATH ### 4806 [1,2,4,8589934592,1,0] -val_199 hdfs://### HDFS PATH ### 646 [1,2,4,8589934592,1,0] -val_2 hdfs://### HDFS PATH ### 4004 [1,2,4,8589934592,1,0] -val_20 hdfs://### HDFS PATH ### 1118 [1,2,4,8589934592,1,0] -val_200 hdfs://### HDFS PATH ### 4698 [1,2,4,8589934592,1,0] -val_200 hdfs://### HDFS PATH ### 5790 [1,2,4,8589934592,1,0] -val_201 hdfs://### HDFS PATH ### 4384 [1,2,4,8589934592,1,0] -val_202 hdfs://### HDFS PATH ### 3932 [1,2,4,8589934592,1,0] -val_203 hdfs://### HDFS PATH ### 4314 [1,2,4,8589934592,1,0] -val_203 hdfs://### HDFS PATH ### 944 [1,2,4,8589934592,1,0] -val_205 hdfs://### HDFS PATH ### 1046 [1,2,4,8589934592,1,0] -val_205 hdfs://### HDFS PATH ### 2272 [1,2,4,8589934592,1,0] -val_207 hdfs://### HDFS PATH ### 5022 [1,2,4,8589934592,1,0] -val_207 hdfs://### HDFS PATH ### 634 [1,2,4,8589934592,1,0] -val_208 hdfs://### HDFS PATH ### 1272 [1,2,4,8589934592,1,0] -val_208 hdfs://### HDFS PATH ### 1948 [1,2,4,8589934592,1,0] -val_208 hdfs://### HDFS PATH ### 670 [1,2,4,8589934592,1,0] -val_209 hdfs://### HDFS PATH ### 3504 [1,2,4,8589934592,1,0] -val_209 hdfs://### HDFS PATH ### 374 [1,2,4,8589934592,1,0] -val_213 hdfs://### HDFS PATH ### 1508 [1,2,4,8589934592,1,0] -val_213 hdfs://### HDFS PATH ### 220 [1,2,4,8589934592,1,0] -val_214 hdfs://### HDFS PATH ### 5116 [1,2,4,8589934592,1,0] -val_216 hdfs://### HDFS PATH ### 1520 [1,2,4,8589934592,1,0] -val_216 hdfs://### HDFS PATH ### 3650 [1,2,4,8589934592,1,0] -val_217 hdfs://### HDFS PATH ### 1860 [1,2,4,8589934592,1,0] -val_217 hdfs://### HDFS PATH ### 4396 [1,2,4,8589934592,1,0] -val_218 hdfs://### HDFS PATH ### 3446 [1,2,4,8589934592,1,0] -val_219 hdfs://### HDFS PATH ### 3710 [1,2,4,8589934592,1,0] -val_219 hdfs://### HDFS PATH ### 478 [1,2,4,8589934592,1,0] -val_221 hdfs://### HDFS PATH ### 1164 [1,2,4,8589934592,1,0] -val_221 hdfs://### HDFS PATH ### 1580 [1,2,4,8589934592,1,0] -val_222 hdfs://### HDFS PATH ### 5720 [1,2,4,8589934592,1,0] -val_223 hdfs://### HDFS PATH ### 3398 [1,2,4,8589934592,1,0] -val_223 hdfs://### HDFS PATH ### 3758 [1,2,4,8589934592,1,0] -val_224 hdfs://### HDFS PATH ### 174 [1,2,4,8589934592,1,0] -val_224 hdfs://### HDFS PATH ### 2892 [1,2,4,8589934592,1,0] -val_226 hdfs://### HDFS PATH ### 3048 [1,2,4,8589934592,1,0] -val_228 hdfs://### HDFS PATH ### 3458 [1,2,4,8589934592,1,0] -val_229 hdfs://### HDFS PATH ### 3202 [1,2,4,8589934592,1,0] -val_229 hdfs://### HDFS PATH ### 3956 [1,2,4,8589934592,1,0] -val_230 hdfs://### HDFS PATH ### 1730 [1,2,4,8589934592,1,0] -val_230 hdfs://### HDFS PATH ### 1936 [1,2,4,8589934592,1,0] -val_230 hdfs://### HDFS PATH ### 2260 [1,2,4,8589934592,1,0] -val_230 hdfs://### HDFS PATH ### 3580 [1,2,4,8589934592,1,0] -val_230 hdfs://### HDFS PATH ### 4914 [1,2,4,8589934592,1,0] -val_233 hdfs://### HDFS PATH ### 3214 [1,2,4,8589934592,1,0] -val_233 hdfs://### HDFS PATH ### 5140 [1,2,4,8589934592,1,0] -val_235 hdfs://### HDFS PATH ### 4046 [1,2,4,8589934592,1,0] -val_237 hdfs://### HDFS PATH ### 4722 [1,2,4,8589934592,1,0] -val_237 hdfs://### HDFS PATH ### 574 [1,2,4,8589934592,1,0] -val_238 hdfs://### HDFS PATH ### 0 [1,2,4,8589934592,1,0] -val_238 hdfs://### HDFS PATH ### 2746 [1,2,4,8589934592,1,0] -val_239 hdfs://### HDFS PATH ### 1496 [1,2,4,8589934592,1,0] -val_239 hdfs://### HDFS PATH ### 3722 [1,2,4,8589934592,1,0] -val_24 hdfs://### HDFS PATH ### 1972 [1,2,4,8589934592,1,0] -val_24 hdfs://### HDFS PATH ### 4594 [1,2,4,8589934592,1,0] -val_241 hdfs://### HDFS PATH ### 1662 [1,2,4,8589934592,1,0] -val_242 hdfs://### HDFS PATH ### 2940 [1,2,4,8589934592,1,0] -val_242 hdfs://### HDFS PATH ### 3012 [1,2,4,8589934592,1,0] -val_244 hdfs://### HDFS PATH ### 3872 [1,2,4,8589934592,1,0] -val_247 hdfs://### HDFS PATH ### 718 [1,2,4,8589934592,1,0] -val_248 hdfs://### HDFS PATH ### 4758 [1,2,4,8589934592,1,0] -val_249 hdfs://### HDFS PATH ### 5034 [1,2,4,8589934592,1,0] -val_252 hdfs://### HDFS PATH ### 454 [1,2,4,8589934592,1,0] -val_255 hdfs://### HDFS PATH ### 4616 [1,2,4,8589934592,1,0] -val_255 hdfs://### HDFS PATH ### 68 [1,2,4,8589934592,1,0] -val_256 hdfs://### HDFS PATH ### 3770 [1,2,4,8589934592,1,0] -val_256 hdfs://### HDFS PATH ### 5272 [1,2,4,8589934592,1,0] -val_257 hdfs://### HDFS PATH ### 4208 [1,2,4,8589934592,1,0] -val_258 hdfs://### HDFS PATH ### 4292 [1,2,4,8589934592,1,0] -val_26 hdfs://### HDFS PATH ### 2226 [1,2,4,8589934592,1,0] -val_26 hdfs://### HDFS PATH ### 5284 [1,2,4,8589934592,1,0] -val_260 hdfs://### HDFS PATH ### 1764 [1,2,4,8589934592,1,0] -val_262 hdfs://### HDFS PATH ### 4326 [1,2,4,8589934592,1,0] -val_263 hdfs://### HDFS PATH ### 3782 [1,2,4,8589934592,1,0] -val_265 hdfs://### HDFS PATH ### 114 [1,2,4,8589934592,1,0] -val_265 hdfs://### HDFS PATH ### 5046 [1,2,4,8589934592,1,0] -val_266 hdfs://### HDFS PATH ### 814 [1,2,4,8589934592,1,0] -val_27 hdfs://### HDFS PATH ### 34 [1,2,4,8589934592,1,0] -val_272 hdfs://### HDFS PATH ### 1836 [1,2,4,8589934592,1,0] -val_272 hdfs://### HDFS PATH ### 2976 [1,2,4,8589934592,1,0] -val_273 hdfs://### HDFS PATH ### 162 [1,2,4,8589934592,1,0] -val_273 hdfs://### HDFS PATH ### 2868 [1,2,4,8589934592,1,0] -val_273 hdfs://### HDFS PATH ### 5524 [1,2,4,8589934592,1,0] -val_274 hdfs://### HDFS PATH ### 3698 [1,2,4,8589934592,1,0] -val_275 hdfs://### HDFS PATH ### 1638 [1,2,4,8589934592,1,0] -val_277 hdfs://### HDFS PATH ### 1260 [1,2,4,8589934592,1,0] -val_277 hdfs://### HDFS PATH ### 2856 [1,2,4,8589934592,1,0] -val_277 hdfs://### HDFS PATH ### 362 [1,2,4,8589934592,1,0] -val_277 hdfs://### HDFS PATH ### 4902 [1,2,4,8589934592,1,0] -val_278 hdfs://### HDFS PATH ### 1544 [1,2,4,8589934592,1,0] -val_278 hdfs://### HDFS PATH ### 80 [1,2,4,8589934592,1,0] -val_28 hdfs://### HDFS PATH ### 5616 [1,2,4,8589934592,1,0] -val_280 hdfs://### HDFS PATH ### 1226 [1,2,4,8589934592,1,0] -val_280 hdfs://### HDFS PATH ### 3992 [1,2,4,8589934592,1,0] -val_281 hdfs://### HDFS PATH ### 350 [1,2,4,8589934592,1,0] -val_281 hdfs://### HDFS PATH ### 5548 [1,2,4,8589934592,1,0] -val_282 hdfs://### HDFS PATH ### 2468 [1,2,4,8589934592,1,0] -val_282 hdfs://### HDFS PATH ### 2722 [1,2,4,8589934592,1,0] -val_283 hdfs://### HDFS PATH ### 4022 [1,2,4,8589934592,1,0] -val_284 hdfs://### HDFS PATH ### 1708 [1,2,4,8589934592,1,0] -val_285 hdfs://### HDFS PATH ### 5478 [1,2,4,8589934592,1,0] -val_286 hdfs://### HDFS PATH ### 1404 [1,2,4,8589934592,1,0] -val_287 hdfs://### HDFS PATH ### 490 [1,2,4,8589934592,1,0] -val_288 hdfs://### HDFS PATH ### 2422 [1,2,4,8589934592,1,0] -val_288 hdfs://### HDFS PATH ### 3840 [1,2,4,8589934592,1,0] -val_289 hdfs://### HDFS PATH ### 1568 [1,2,4,8589934592,1,0] -val_291 hdfs://### HDFS PATH ### 4582 [1,2,4,8589934592,1,0] -val_292 hdfs://### HDFS PATH ### 466 [1,2,4,8589934592,1,0] -val_296 hdfs://### HDFS PATH ### 3626 [1,2,4,8589934592,1,0] -val_298 hdfs://### HDFS PATH ### 2168 [1,2,4,8589934592,1,0] -val_298 hdfs://### HDFS PATH ### 4456 [1,2,4,8589934592,1,0] -val_298 hdfs://### HDFS PATH ### 5386 [1,2,4,8589934592,1,0] -val_30 hdfs://### HDFS PATH ### 3494 [1,2,4,8589934592,1,0] -val_302 hdfs://### HDFS PATH ### 1034 [1,2,4,8589934592,1,0] -val_305 hdfs://### HDFS PATH ### 4782 [1,2,4,8589934592,1,0] -val_306 hdfs://### HDFS PATH ### 2880 [1,2,4,8589934592,1,0] -val_307 hdfs://### HDFS PATH ### 2812 [1,2,4,8589934592,1,0] -val_307 hdfs://### HDFS PATH ### 5672 [1,2,4,8589934592,1,0] -val_308 hdfs://### HDFS PATH ### 2388 [1,2,4,8589934592,1,0] -val_309 hdfs://### HDFS PATH ### 2904 [1,2,4,8589934592,1,0] -val_309 hdfs://### HDFS PATH ### 790 [1,2,4,8589934592,1,0] -val_310 hdfs://### HDFS PATH ### 4962 [1,2,4,8589934592,1,0] -val_311 hdfs://### HDFS PATH ### 1000 [1,2,4,8589934592,1,0] -val_311 hdfs://### HDFS PATH ### 1626 [1,2,4,8589934592,1,0] -val_311 hdfs://### HDFS PATH ### 22 [1,2,4,8589934592,1,0] -val_315 hdfs://### HDFS PATH ### 5594 [1,2,4,8589934592,1,0] -val_316 hdfs://### HDFS PATH ### 1012 [1,2,4,8589934592,1,0] -val_316 hdfs://### HDFS PATH ### 2576 [1,2,4,8589934592,1,0] -val_316 hdfs://### HDFS PATH ### 3944 [1,2,4,8589934592,1,0] -val_317 hdfs://### HDFS PATH ### 3104 [1,2,4,8589934592,1,0] -val_317 hdfs://### HDFS PATH ### 4974 [1,2,4,8589934592,1,0] -val_318 hdfs://### HDFS PATH ### 1602 [1,2,4,8589934592,1,0] -val_318 hdfs://### HDFS PATH ### 2504 [1,2,4,8589934592,1,0] -val_318 hdfs://### HDFS PATH ### 2516 [1,2,4,8589934592,1,0] -val_321 hdfs://### HDFS PATH ### 3308 [1,2,4,8589934592,1,0] -val_321 hdfs://### HDFS PATH ### 4090 [1,2,4,8589934592,1,0] -val_322 hdfs://### HDFS PATH ### 2096 [1,2,4,8589934592,1,0] -val_322 hdfs://### HDFS PATH ### 3250 [1,2,4,8589934592,1,0] -val_323 hdfs://### HDFS PATH ### 4878 [1,2,4,8589934592,1,0] -val_325 hdfs://### HDFS PATH ### 4890 [1,2,4,8589934592,1,0] -val_325 hdfs://### HDFS PATH ### 862 [1,2,4,8589934592,1,0] -val_327 hdfs://### HDFS PATH ### 2248 [1,2,4,8589934592,1,0] -val_327 hdfs://### HDFS PATH ### 2928 [1,2,4,8589934592,1,0] -val_327 hdfs://### HDFS PATH ### 338 [1,2,4,8589934592,1,0] -val_33 hdfs://### HDFS PATH ### 3592 [1,2,4,8589934592,1,0] -val_331 hdfs://### HDFS PATH ### 2988 [1,2,4,8589934592,1,0] -val_331 hdfs://### HDFS PATH ### 4034 [1,2,4,8589934592,1,0] -val_332 hdfs://### HDFS PATH ### 1614 [1,2,4,8589934592,1,0] -val_333 hdfs://### HDFS PATH ### 1684 [1,2,4,8589934592,1,0] -val_333 hdfs://### HDFS PATH ### 4986 [1,2,4,8589934592,1,0] -val_335 hdfs://### HDFS PATH ### 4102 [1,2,4,8589934592,1,0] -val_336 hdfs://### HDFS PATH ### 3148 [1,2,4,8589934592,1,0] -val_338 hdfs://### HDFS PATH ### 526 [1,2,4,8589934592,1,0] -val_339 hdfs://### HDFS PATH ### 956 [1,2,4,8589934592,1,0] -val_34 hdfs://### HDFS PATH ### 3192 [1,2,4,8589934592,1,0] -val_341 hdfs://### HDFS PATH ### 5406 [1,2,4,8589934592,1,0] -val_342 hdfs://### HDFS PATH ### 3558 [1,2,4,8589934592,1,0] -val_342 hdfs://### HDFS PATH ### 838 [1,2,4,8589934592,1,0] -val_344 hdfs://### HDFS PATH ### 3674 [1,2,4,8589934592,1,0] -val_344 hdfs://### HDFS PATH ### 5560 [1,2,4,8589934592,1,0] -val_345 hdfs://### HDFS PATH ### 1082 [1,2,4,8589934592,1,0] -val_348 hdfs://### HDFS PATH ### 1882 [1,2,4,8589934592,1,0] -val_348 hdfs://### HDFS PATH ### 1960 [1,2,4,8589934592,1,0] -val_348 hdfs://### HDFS PATH ### 4338 [1,2,4,8589934592,1,0] -val_348 hdfs://### HDFS PATH ### 5490 [1,2,4,8589934592,1,0] -val_348 hdfs://### HDFS PATH ### 5660 [1,2,4,8589934592,1,0] -val_35 hdfs://### HDFS PATH ### 1238 [1,2,4,8589934592,1,0] -val_35 hdfs://### HDFS PATH ### 3138 [1,2,4,8589934592,1,0] -val_35 hdfs://### HDFS PATH ### 4012 [1,2,4,8589934592,1,0] -val_351 hdfs://### HDFS PATH ### 4604 [1,2,4,8589934592,1,0] -val_353 hdfs://### HDFS PATH ### 1812 [1,2,4,8589934592,1,0] -val_353 hdfs://### HDFS PATH ### 5092 [1,2,4,8589934592,1,0] -val_356 hdfs://### HDFS PATH ### 1284 [1,2,4,8589934592,1,0] -val_360 hdfs://### HDFS PATH ### 4746 [1,2,4,8589934592,1,0] -val_362 hdfs://### HDFS PATH ### 5454 [1,2,4,8589934592,1,0] -val_364 hdfs://### HDFS PATH ### 2662 [1,2,4,8589934592,1,0] -val_365 hdfs://### HDFS PATH ### 802 [1,2,4,8589934592,1,0] -val_366 hdfs://### HDFS PATH ### 4138 [1,2,4,8589934592,1,0] -val_367 hdfs://### HDFS PATH ### 3662 [1,2,4,8589934592,1,0] -val_367 hdfs://### HDFS PATH ### 850 [1,2,4,8589934592,1,0] -val_368 hdfs://### HDFS PATH ### 3602 [1,2,4,8589934592,1,0] -val_369 hdfs://### HDFS PATH ### 186 [1,2,4,8589934592,1,0] -val_369 hdfs://### HDFS PATH ### 2564 [1,2,4,8589934592,1,0] -val_369 hdfs://### HDFS PATH ### 2952 [1,2,4,8589934592,1,0] -val_37 hdfs://### HDFS PATH ### 328 [1,2,4,8589934592,1,0] -val_37 hdfs://### HDFS PATH ### 5626 [1,2,4,8589934592,1,0] -val_373 hdfs://### HDFS PATH ### 1824 [1,2,4,8589934592,1,0] -val_374 hdfs://### HDFS PATH ### 268 [1,2,4,8589934592,1,0] -val_375 hdfs://### HDFS PATH ### 5212 [1,2,4,8589934592,1,0] -val_377 hdfs://### HDFS PATH ### 766 [1,2,4,8589934592,1,0] -val_378 hdfs://### HDFS PATH ### 1152 [1,2,4,8589934592,1,0] -val_379 hdfs://### HDFS PATH ### 5328 [1,2,4,8589934592,1,0] -val_382 hdfs://### HDFS PATH ### 1320 [1,2,4,8589934592,1,0] -val_382 hdfs://### HDFS PATH ### 4528 [1,2,4,8589934592,1,0] -val_384 hdfs://### HDFS PATH ### 1788 [1,2,4,8589934592,1,0] -val_384 hdfs://### HDFS PATH ### 5260 [1,2,4,8589934592,1,0] -val_384 hdfs://### HDFS PATH ### 5316 [1,2,4,8589934592,1,0] -val_386 hdfs://### HDFS PATH ### 1356 [1,2,4,8589934592,1,0] -val_389 hdfs://### HDFS PATH ### 2916 [1,2,4,8589934592,1,0] -val_392 hdfs://### HDFS PATH ### 2964 [1,2,4,8589934592,1,0] -val_393 hdfs://### HDFS PATH ### 2132 [1,2,4,8589934592,1,0] -val_394 hdfs://### HDFS PATH ### 562 [1,2,4,8589934592,1,0] -val_395 hdfs://### HDFS PATH ### 2710 [1,2,4,8589934592,1,0] -val_395 hdfs://### HDFS PATH ### 3116 [1,2,4,8589934592,1,0] -val_396 hdfs://### HDFS PATH ### 3092 [1,2,4,8589934592,1,0] -val_396 hdfs://### HDFS PATH ### 4372 [1,2,4,8589934592,1,0] -val_396 hdfs://### HDFS PATH ### 706 [1,2,4,8589934592,1,0] -val_397 hdfs://### HDFS PATH ### 4558 [1,2,4,8589934592,1,0] -val_397 hdfs://### HDFS PATH ### 778 [1,2,4,8589934592,1,0] -val_399 hdfs://### HDFS PATH ### 1296 [1,2,4,8589934592,1,0] -val_399 hdfs://### HDFS PATH ### 694 [1,2,4,8589934592,1,0] -val_4 hdfs://### HDFS PATH ### 1218 [1,2,4,8589934592,1,0] -val_400 hdfs://### HDFS PATH ### 5778 [1,2,4,8589934592,1,0] -val_401 hdfs://### HDFS PATH ### 138 [1,2,4,8589934592,1,0] -val_401 hdfs://### HDFS PATH ### 3000 [1,2,4,8589934592,1,0] -val_401 hdfs://### HDFS PATH ### 3828 [1,2,4,8589934592,1,0] -val_401 hdfs://### HDFS PATH ### 4268 [1,2,4,8589934592,1,0] -val_401 hdfs://### HDFS PATH ### 5224 [1,2,4,8589934592,1,0] -val_402 hdfs://### HDFS PATH ### 3080 [1,2,4,8589934592,1,0] -val_403 hdfs://### HDFS PATH ### 406 [1,2,4,8589934592,1,0] -val_403 hdfs://### HDFS PATH ### 4162 [1,2,4,8589934592,1,0] -val_403 hdfs://### HDFS PATH ### 5766 [1,2,4,8589934592,1,0] -val_404 hdfs://### HDFS PATH ### 1776 [1,2,4,8589934592,1,0] -val_404 hdfs://### HDFS PATH ### 2318 [1,2,4,8589934592,1,0] -val_406 hdfs://### HDFS PATH ### 244 [1,2,4,8589934592,1,0] -val_406 hdfs://### HDFS PATH ### 4220 [1,2,4,8589934592,1,0] -val_406 hdfs://### HDFS PATH ### 4256 [1,2,4,8589934592,1,0] -val_406 hdfs://### HDFS PATH ### 5152 [1,2,4,8589934592,1,0] -val_407 hdfs://### HDFS PATH ### 5248 [1,2,4,8589934592,1,0] -val_409 hdfs://### HDFS PATH ### 2528 [1,2,4,8589934592,1,0] -val_409 hdfs://### HDFS PATH ### 4232 [1,2,4,8589934592,1,0] -val_409 hdfs://### HDFS PATH ### 56 [1,2,4,8589934592,1,0] -val_41 hdfs://### HDFS PATH ### 3388 [1,2,4,8589934592,1,0] -val_411 hdfs://### HDFS PATH ### 1924 [1,2,4,8589934592,1,0] -val_413 hdfs://### HDFS PATH ### 2600 [1,2,4,8589934592,1,0] -val_413 hdfs://### HDFS PATH ### 610 [1,2,4,8589934592,1,0] -val_414 hdfs://### HDFS PATH ### 4686 [1,2,4,8589934592,1,0] -val_414 hdfs://### HDFS PATH ### 5696 [1,2,4,8589934592,1,0] -val_417 hdfs://### HDFS PATH ### 430 [1,2,4,8589934592,1,0] -val_417 hdfs://### HDFS PATH ### 4794 [1,2,4,8589934592,1,0] -val_417 hdfs://### HDFS PATH ### 730 [1,2,4,8589934592,1,0] -val_418 hdfs://### HDFS PATH ### 2204 [1,2,4,8589934592,1,0] -val_419 hdfs://### HDFS PATH ### 2758 [1,2,4,8589934592,1,0] -val_42 hdfs://### HDFS PATH ### 2030 [1,2,4,8589934592,1,0] -val_42 hdfs://### HDFS PATH ### 3298 [1,2,4,8589934592,1,0] -val_421 hdfs://### HDFS PATH ### 5236 [1,2,4,8589934592,1,0] -val_424 hdfs://### HDFS PATH ### 4350 [1,2,4,8589934592,1,0] -val_424 hdfs://### HDFS PATH ### 4504 [1,2,4,8589934592,1,0] -val_427 hdfs://### HDFS PATH ### 1248 [1,2,4,8589934592,1,0] -val_429 hdfs://### HDFS PATH ### 256 [1,2,4,8589934592,1,0] -val_429 hdfs://### HDFS PATH ### 4842 [1,2,4,8589934592,1,0] -val_43 hdfs://### HDFS PATH ### 2330 [1,2,4,8589934592,1,0] -val_430 hdfs://### HDFS PATH ### 1532 [1,2,4,8589934592,1,0] -val_430 hdfs://### HDFS PATH ### 3320 [1,2,4,8589934592,1,0] -val_430 hdfs://### HDFS PATH ### 442 [1,2,4,8589934592,1,0] -val_431 hdfs://### HDFS PATH ### 1994 [1,2,4,8589934592,1,0] -val_431 hdfs://### HDFS PATH ### 4420 [1,2,4,8589934592,1,0] -val_431 hdfs://### HDFS PATH ### 4480 [1,2,4,8589934592,1,0] -val_432 hdfs://### HDFS PATH ### 3920 [1,2,4,8589934592,1,0] -val_435 hdfs://### HDFS PATH ### 2834 [1,2,4,8589934592,1,0] -val_436 hdfs://### HDFS PATH ### 2340 [1,2,4,8589934592,1,0] -val_437 hdfs://### HDFS PATH ### 1368 [1,2,4,8589934592,1,0] -val_438 hdfs://### HDFS PATH ### 1070 [1,2,4,8589934592,1,0] -val_438 hdfs://### HDFS PATH ### 3884 [1,2,4,8589934592,1,0] -val_438 hdfs://### HDFS PATH ### 4662 [1,2,4,8589934592,1,0] -val_439 hdfs://### HDFS PATH ### 4734 [1,2,4,8589934592,1,0] -val_439 hdfs://### HDFS PATH ### 826 [1,2,4,8589934592,1,0] -val_44 hdfs://### HDFS PATH ### 4068 [1,2,4,8589934592,1,0] -val_443 hdfs://### HDFS PATH ### 4866 [1,2,4,8589934592,1,0] -val_444 hdfs://### HDFS PATH ### 4818 [1,2,4,8589934592,1,0] -val_446 hdfs://### HDFS PATH ### 538 [1,2,4,8589934592,1,0] -val_448 hdfs://### HDFS PATH ### 5636 [1,2,4,8589934592,1,0] -val_449 hdfs://### HDFS PATH ### 3434 [1,2,4,8589934592,1,0] -val_452 hdfs://### HDFS PATH ### 3024 [1,2,4,8589934592,1,0] -val_453 hdfs://### HDFS PATH ### 3482 [1,2,4,8589934592,1,0] -val_454 hdfs://### HDFS PATH ### 2144 [1,2,4,8589934592,1,0] -val_454 hdfs://### HDFS PATH ### 4432 [1,2,4,8589934592,1,0] -val_454 hdfs://### HDFS PATH ### 5200 [1,2,4,8589934592,1,0] -val_455 hdfs://### HDFS PATH ### 976 [1,2,4,8589934592,1,0] -val_457 hdfs://### HDFS PATH ### 2446 [1,2,4,8589934592,1,0] -val_458 hdfs://### HDFS PATH ### 3356 [1,2,4,8589934592,1,0] -val_458 hdfs://### HDFS PATH ### 5442 [1,2,4,8589934592,1,0] -val_459 hdfs://### HDFS PATH ### 1450 [1,2,4,8589934592,1,0] -val_459 hdfs://### HDFS PATH ### 550 [1,2,4,8589934592,1,0] -val_460 hdfs://### HDFS PATH ### 5010 [1,2,4,8589934592,1,0] -val_462 hdfs://### HDFS PATH ### 5128 [1,2,4,8589934592,1,0] -val_462 hdfs://### HDFS PATH ### 5350 [1,2,4,8589934592,1,0] -val_463 hdfs://### HDFS PATH ### 1982 [1,2,4,8589934592,1,0] -val_463 hdfs://### HDFS PATH ### 3980 [1,2,4,8589934592,1,0] -val_466 hdfs://### HDFS PATH ### 1894 [1,2,4,8589934592,1,0] -val_466 hdfs://### HDFS PATH ### 4126 [1,2,4,8589934592,1,0] -val_466 hdfs://### HDFS PATH ### 658 [1,2,4,8589934592,1,0] -val_467 hdfs://### HDFS PATH ### 3908 [1,2,4,8589934592,1,0] -val_468 hdfs://### HDFS PATH ### 2120 [1,2,4,8589934592,1,0] -val_468 hdfs://### HDFS PATH ### 2376 [1,2,4,8589934592,1,0] -val_468 hdfs://### HDFS PATH ### 3526 [1,2,4,8589934592,1,0] -val_468 hdfs://### HDFS PATH ### 4950 [1,2,4,8589934592,1,0] -val_469 hdfs://### HDFS PATH ### 1380 [1,2,4,8589934592,1,0] -val_469 hdfs://### HDFS PATH ### 2364 [1,2,4,8589934592,1,0] -val_469 hdfs://### HDFS PATH ### 292 [1,2,4,8589934592,1,0] -val_469 hdfs://### HDFS PATH ### 3968 [1,2,4,8589934592,1,0] -val_469 hdfs://### HDFS PATH ### 5582 [1,2,4,8589934592,1,0] -val_47 hdfs://### HDFS PATH ### 1198 [1,2,4,8589934592,1,0] -val_470 hdfs://### HDFS PATH ### 2540 [1,2,4,8589934592,1,0] -val_472 hdfs://### HDFS PATH ### 3238 [1,2,4,8589934592,1,0] -val_475 hdfs://### HDFS PATH ### 898 [1,2,4,8589934592,1,0] -val_477 hdfs://### HDFS PATH ### 5708 [1,2,4,8589934592,1,0] -val_478 hdfs://### HDFS PATH ### 4444 [1,2,4,8589934592,1,0] -val_478 hdfs://### HDFS PATH ### 4926 [1,2,4,8589934592,1,0] -val_479 hdfs://### HDFS PATH ### 4770 [1,2,4,8589934592,1,0] -val_480 hdfs://### HDFS PATH ### 3816 [1,2,4,8589934592,1,0] -val_480 hdfs://### HDFS PATH ### 4570 [1,2,4,8589934592,1,0] -val_480 hdfs://### HDFS PATH ### 5058 [1,2,4,8589934592,1,0] -val_481 hdfs://### HDFS PATH ### 2434 [1,2,4,8589934592,1,0] -val_482 hdfs://### HDFS PATH ### 586 [1,2,4,8589934592,1,0] -val_483 hdfs://### HDFS PATH ### 4174 [1,2,4,8589934592,1,0] -val_484 hdfs://### HDFS PATH ### 102 [1,2,4,8589934592,1,0] -val_485 hdfs://### HDFS PATH ### 3734 [1,2,4,8589934592,1,0] -val_487 hdfs://### HDFS PATH ### 3804 [1,2,4,8589934592,1,0] -val_489 hdfs://### HDFS PATH ### 1128 [1,2,4,8589934592,1,0] -val_489 hdfs://### HDFS PATH ### 1800 [1,2,4,8589934592,1,0] -val_489 hdfs://### HDFS PATH ### 3344 [1,2,4,8589934592,1,0] -val_489 hdfs://### HDFS PATH ### 742 [1,2,4,8589934592,1,0] -val_490 hdfs://### HDFS PATH ### 2640 [1,2,4,8589934592,1,0] -val_491 hdfs://### HDFS PATH ### 4710 [1,2,4,8589934592,1,0] -val_492 hdfs://### HDFS PATH ### 3410 [1,2,4,8589934592,1,0] -val_492 hdfs://### HDFS PATH ### 5362 [1,2,4,8589934592,1,0] -val_493 hdfs://### HDFS PATH ### 4998 [1,2,4,8589934592,1,0] -val_494 hdfs://### HDFS PATH ### 622 [1,2,4,8589934592,1,0] -val_495 hdfs://### HDFS PATH ### 316 [1,2,4,8589934592,1,0] -val_496 hdfs://### HDFS PATH ### 2076 [1,2,4,8589934592,1,0] -val_497 hdfs://### HDFS PATH ### 3068 [1,2,4,8589934592,1,0] -val_498 hdfs://### HDFS PATH ### 1332 [1,2,4,8589934592,1,0] -val_498 hdfs://### HDFS PATH ### 3262 [1,2,4,8589934592,1,0] -val_498 hdfs://### HDFS PATH ### 5418 [1,2,4,8589934592,1,0] -val_5 hdfs://### HDFS PATH ### 3060 [1,2,4,8589934592,1,0] -val_5 hdfs://### HDFS PATH ### 3864 [1,2,4,8589934592,1,0] -val_5 hdfs://### HDFS PATH ### 4540 [1,2,4,8589934592,1,0] -val_51 hdfs://### HDFS PATH ### 1462 [1,2,4,8589934592,1,0] -val_51 hdfs://### HDFS PATH ### 2308 [1,2,4,8589934592,1,0] -val_53 hdfs://### HDFS PATH ### 4186 [1,2,4,8589934592,1,0] -val_54 hdfs://### HDFS PATH ### 1440 [1,2,4,8589934592,1,0] -val_57 hdfs://### HDFS PATH ### 1024 [1,2,4,8589934592,1,0] -val_58 hdfs://### HDFS PATH ### 1906 [1,2,4,8589934592,1,0] -val_58 hdfs://### HDFS PATH ### 3128 [1,2,4,8589934592,1,0] -val_64 hdfs://### HDFS PATH ### 3516 [1,2,4,8589934592,1,0] -val_65 hdfs://### HDFS PATH ### 1592 [1,2,4,8589934592,1,0] -val_66 hdfs://### HDFS PATH ### 198 [1,2,4,8589934592,1,0] -val_67 hdfs://### HDFS PATH ### 1754 [1,2,4,8589934592,1,0] -val_67 hdfs://### HDFS PATH ### 5306 [1,2,4,8589934592,1,0] -val_69 hdfs://### HDFS PATH ### 3570 [1,2,4,8589934592,1,0] -val_70 hdfs://### HDFS PATH ### 3794 [1,2,4,8589934592,1,0] -val_70 hdfs://### HDFS PATH ### 4548 [1,2,4,8589934592,1,0] -val_70 hdfs://### HDFS PATH ### 4640 [1,2,4,8589934592,1,0] -val_72 hdfs://### HDFS PATH ### 1208 [1,2,4,8589934592,1,0] -val_72 hdfs://### HDFS PATH ### 2792 [1,2,4,8589934592,1,0] -val_74 hdfs://### HDFS PATH ### 3548 [1,2,4,8589934592,1,0] -val_76 hdfs://### HDFS PATH ### 3378 [1,2,4,8589934592,1,0] -val_76 hdfs://### HDFS PATH ### 3538 [1,2,4,8589934592,1,0] -val_77 hdfs://### HDFS PATH ### 2622 [1,2,4,8589934592,1,0] -val_78 hdfs://### HDFS PATH ### 3368 [1,2,4,8589934592,1,0] -val_8 hdfs://### HDFS PATH ### 1916 [1,2,4,8589934592,1,0] -val_80 hdfs://### HDFS PATH ### 4058 [1,2,4,8589934592,1,0] -val_82 hdfs://### HDFS PATH ### 396 [1,2,4,8589934592,1,0] -val_83 hdfs://### HDFS PATH ### 1674 [1,2,4,8589934592,1,0] -val_83 hdfs://### HDFS PATH ### 5070 [1,2,4,8589934592,1,0] -val_84 hdfs://### HDFS PATH ### 1872 [1,2,4,8589934592,1,0] -val_84 hdfs://### HDFS PATH ### 5606 [1,2,4,8589934592,1,0] -val_85 hdfs://### HDFS PATH ### 2612 [1,2,4,8589934592,1,0] -val_86 hdfs://### HDFS PATH ### 12 [1,2,4,8589934592,1,0] -val_87 hdfs://### HDFS PATH ### 2652 [1,2,4,8589934592,1,0] -val_9 hdfs://### HDFS PATH ### 5398 [1,2,4,8589934592,1,0] -val_90 hdfs://### HDFS PATH ### 2802 [1,2,4,8589934592,1,0] -val_90 hdfs://### HDFS PATH ### 4304 [1,2,4,8589934592,1,0] -val_90 hdfs://### HDFS PATH ### 5744 [1,2,4,8589934592,1,0] -val_92 hdfs://### HDFS PATH ### 1176 [1,2,4,8589934592,1,0] -val_95 hdfs://### HDFS PATH ### 2400 [1,2,4,8589934592,1,0] -val_95 hdfs://### HDFS PATH ### 3160 [1,2,4,8589934592,1,0] -val_96 hdfs://### HDFS PATH ### 2216 [1,2,4,8589934592,1,0] -val_97 hdfs://### HDFS PATH ### 5572 [1,2,4,8589934592,1,0] -val_97 hdfs://### HDFS PATH ### 5802 [1,2,4,8589934592,1,0] -val_98 hdfs://### HDFS PATH ### 2458 [1,2,4,8589934592,1,0] -val_98 hdfs://### HDFS PATH ### 92 [1,2,4,8589934592,1,0] -PREHOOK: query: EXPLAIN -SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets` -FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__ - WHERE key = 0) a - JOIN - (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__ - WHERE value = "val_0") b - ON - a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT -EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets` -FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__ - WHERE key = 0) a - JOIN - (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__ - WHERE value = "val_0") b - ON - a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT -EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: default__src_src1_index__ - Statistics: Num rows: 500 Data size: 46311 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) = 0.0) and _bucketname is not null and _offset is not null) (type: boolean) - Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: bigint) - sort order: ++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) - Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE - value expressions: _col2 (type: array) - TableScan - alias: default__src_src2_index__ - Statistics: Num rows: 500 Data size: 48311 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((value = 'val_0') and _bucketname is not null and _offset is not null) (type: boolean) - Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: bigint) - sort order: ++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) - Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE - value expressions: _col2 (type: array) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: string), _col1 (type: bigint) - 1 _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1, _col2, _col5 - Statistics: Num rows: 275 Data size: 25470 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (not EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(_col2,_col5))) (type: boolean) - Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: collect_set(_col1) - keys: _col0 (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: array) - Reduce Operator Tree: - Group By Operator - aggregations: collect_set(VALUE._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -#### A masked pattern was here #### -SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets` -FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__ - WHERE key = 0) a - JOIN - (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__ - WHERE value = "val_0") b - ON - a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT -EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src1_index__ -PREHOOK: Input: default@default__src_src2_index__ -#### A masked pattern was here #### -SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets` -FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__ - WHERE key = 0) a - JOIN - (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__ - WHERE value = "val_0") b - ON - a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT -EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src1_index__ -POSTHOOK: Input: default@default__src_src2_index__ -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0" -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0" -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0" -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0" -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: DROP INDEX src1_index ON src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src1_index ON src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: DROP INDEX src2_index ON src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src2_index ON src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src diff --git ql/src/test/results/clientpositive/index_bitmap_auto.q.out ql/src/test/results/clientpositive/index_bitmap_auto.q.out deleted file mode 100644 index 3d4c39476c..0000000000 --- ql/src/test/results/clientpositive/index_bitmap_auto.q.out +++ /dev/null @@ -1,1273 +0,0 @@ -PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0" -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0" -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: EXPLAIN -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: EXPLAIN -CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src1_index__ -PREHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src2_index__ -PREHOOK: query: ALTER INDEX src1_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src1_index__ -POSTHOOK: query: ALTER INDEX src1_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src1_index__ -POSTHOOK: Lineage: default__src_src1_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src1_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src1_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src1_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: ALTER INDEX src2_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src2_index__ -POSTHOOK: query: ALTER INDEX src2_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src2_index__ -POSTHOOK: Lineage: default__src_src2_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src2_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src2_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src2_index__.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM default__src_src1_index__ -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src1_index__ -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT * FROM default__src_src1_index__ -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src1_index__ -POSTHOOK: Output: hdfs://### HDFS PATH ### -0 hdfs://### HDFS PATH ### 2088 [1,2,4,8589934592,1,0] -0 hdfs://### HDFS PATH ### 2632 [1,2,4,8589934592,1,0] -0 hdfs://### HDFS PATH ### 968 [1,2,4,8589934592,1,0] -10 hdfs://### HDFS PATH ### 2846 [1,2,4,8589934592,1,0] -100 hdfs://### HDFS PATH ### 2156 [1,2,4,8589934592,1,0] -100 hdfs://### HDFS PATH ### 5374 [1,2,4,8589934592,1,0] -103 hdfs://### HDFS PATH ### 1484 [1,2,4,8589934592,1,0] -103 hdfs://### HDFS PATH ### 3614 [1,2,4,8589934592,1,0] -104 hdfs://### HDFS PATH ### 4114 [1,2,4,8589934592,1,0] -104 hdfs://### HDFS PATH ### 4628 [1,2,4,8589934592,1,0] -105 hdfs://### HDFS PATH ### 4196 [1,2,4,8589934592,1,0] -11 hdfs://### HDFS PATH ### 3170 [1,2,4,8589934592,1,0] -111 hdfs://### HDFS PATH ### 1186 [1,2,4,8589934592,1,0] -113 hdfs://### HDFS PATH ### 3638 [1,2,4,8589934592,1,0] -113 hdfs://### HDFS PATH ### 920 [1,2,4,8589934592,1,0] -114 hdfs://### HDFS PATH ### 4280 [1,2,4,8589934592,1,0] -116 hdfs://### HDFS PATH ### 3746 [1,2,4,8589934592,1,0] -118 hdfs://### HDFS PATH ### 2686 [1,2,4,8589934592,1,0] -118 hdfs://### HDFS PATH ### 2780 [1,2,4,8589934592,1,0] -119 hdfs://### HDFS PATH ### 2064 [1,2,4,8589934592,1,0] -119 hdfs://### HDFS PATH ### 3332 [1,2,4,8589934592,1,0] -119 hdfs://### HDFS PATH ### 4674 [1,2,4,8589934592,1,0] -12 hdfs://### HDFS PATH ### 1720 [1,2,4,8589934592,1,0] -12 hdfs://### HDFS PATH ### 4362 [1,2,4,8589934592,1,0] -120 hdfs://### HDFS PATH ### 2284 [1,2,4,8589934592,1,0] -120 hdfs://### HDFS PATH ### 4830 [1,2,4,8589934592,1,0] -125 hdfs://### HDFS PATH ### 1344 [1,2,4,8589934592,1,0] -125 hdfs://### HDFS PATH ### 4468 [1,2,4,8589934592,1,0] -126 hdfs://### HDFS PATH ### 5732 [1,2,4,8589934592,1,0] -128 hdfs://### HDFS PATH ### 208 [1,2,4,8589934592,1,0] -128 hdfs://### HDFS PATH ### 3896 [1,2,4,8589934592,1,0] -128 hdfs://### HDFS PATH ### 988 [1,2,4,8589934592,1,0] -129 hdfs://### HDFS PATH ### 1094 [1,2,4,8589934592,1,0] -129 hdfs://### HDFS PATH ### 2040 [1,2,4,8589934592,1,0] -131 hdfs://### HDFS PATH ### 2296 [1,2,4,8589934592,1,0] -133 hdfs://### HDFS PATH ### 5164 [1,2,4,8589934592,1,0] -134 hdfs://### HDFS PATH ### 2698 [1,2,4,8589934592,1,0] -134 hdfs://### HDFS PATH ### 5294 [1,2,4,8589934592,1,0] -136 hdfs://### HDFS PATH ### 5080 [1,2,4,8589934592,1,0] -137 hdfs://### HDFS PATH ### 1650 [1,2,4,8589934592,1,0] -137 hdfs://### HDFS PATH ### 2552 [1,2,4,8589934592,1,0] -138 hdfs://### HDFS PATH ### 1472 [1,2,4,8589934592,1,0] -138 hdfs://### HDFS PATH ### 1848 [1,2,4,8589934592,1,0] -138 hdfs://### HDFS PATH ### 2734 [1,2,4,8589934592,1,0] -138 hdfs://### HDFS PATH ### 3470 [1,2,4,8589934592,1,0] -143 hdfs://### HDFS PATH ### 3226 [1,2,4,8589934592,1,0] -145 hdfs://### HDFS PATH ### 304 [1,2,4,8589934592,1,0] -146 hdfs://### HDFS PATH ### 232 [1,2,4,8589934592,1,0] -146 hdfs://### HDFS PATH ### 5430 [1,2,4,8589934592,1,0] -149 hdfs://### HDFS PATH ### 1058 [1,2,4,8589934592,1,0] -149 hdfs://### HDFS PATH ### 3422 [1,2,4,8589934592,1,0] -15 hdfs://### HDFS PATH ### 2770 [1,2,4,8589934592,1,0] -15 hdfs://### HDFS PATH ### 386 [1,2,4,8589934592,1,0] -150 hdfs://### HDFS PATH ### 150 [1,2,4,8589934592,1,0] -152 hdfs://### HDFS PATH ### 280 [1,2,4,8589934592,1,0] -152 hdfs://### HDFS PATH ### 5648 [1,2,4,8589934592,1,0] -153 hdfs://### HDFS PATH ### 502 [1,2,4,8589934592,1,0] -155 hdfs://### HDFS PATH ### 932 [1,2,4,8589934592,1,0] -156 hdfs://### HDFS PATH ### 2352 [1,2,4,8589934592,1,0] -157 hdfs://### HDFS PATH ### 1140 [1,2,4,8589934592,1,0] -158 hdfs://### HDFS PATH ### 2052 [1,2,4,8589934592,1,0] -160 hdfs://### HDFS PATH ### 3274 [1,2,4,8589934592,1,0] -162 hdfs://### HDFS PATH ### 754 [1,2,4,8589934592,1,0] -163 hdfs://### HDFS PATH ### 4650 [1,2,4,8589934592,1,0] -164 hdfs://### HDFS PATH ### 4408 [1,2,4,8589934592,1,0] -164 hdfs://### HDFS PATH ### 4492 [1,2,4,8589934592,1,0] -165 hdfs://### HDFS PATH ### 2236 [1,2,4,8589934592,1,0] -165 hdfs://### HDFS PATH ### 44 [1,2,4,8589934592,1,0] -166 hdfs://### HDFS PATH ### 418 [1,2,4,8589934592,1,0] -167 hdfs://### HDFS PATH ### 3686 [1,2,4,8589934592,1,0] -167 hdfs://### HDFS PATH ### 5502 [1,2,4,8589934592,1,0] -167 hdfs://### HDFS PATH ### 874 [1,2,4,8589934592,1,0] -168 hdfs://### HDFS PATH ### 3180 [1,2,4,8589934592,1,0] -169 hdfs://### HDFS PATH ### 1308 [1,2,4,8589934592,1,0] -169 hdfs://### HDFS PATH ### 2588 [1,2,4,8589934592,1,0] -169 hdfs://### HDFS PATH ### 4854 [1,2,4,8589934592,1,0] -169 hdfs://### HDFS PATH ### 5754 [1,2,4,8589934592,1,0] -17 hdfs://### HDFS PATH ### 910 [1,2,4,8589934592,1,0] -170 hdfs://### HDFS PATH ### 1106 [1,2,4,8589934592,1,0] -172 hdfs://### HDFS PATH ### 2018 [1,2,4,8589934592,1,0] -172 hdfs://### HDFS PATH ### 5104 [1,2,4,8589934592,1,0] -174 hdfs://### HDFS PATH ### 598 [1,2,4,8589934592,1,0] -174 hdfs://### HDFS PATH ### 682 [1,2,4,8589934592,1,0] -175 hdfs://### HDFS PATH ### 4150 [1,2,4,8589934592,1,0] -175 hdfs://### HDFS PATH ### 5176 [1,2,4,8589934592,1,0] -176 hdfs://### HDFS PATH ### 1428 [1,2,4,8589934592,1,0] -176 hdfs://### HDFS PATH ### 1556 [1,2,4,8589934592,1,0] -177 hdfs://### HDFS PATH ### 3036 [1,2,4,8589934592,1,0] -178 hdfs://### HDFS PATH ### 4938 [1,2,4,8589934592,1,0] -179 hdfs://### HDFS PATH ### 2006 [1,2,4,8589934592,1,0] -179 hdfs://### HDFS PATH ### 2674 [1,2,4,8589934592,1,0] -18 hdfs://### HDFS PATH ### 5340 [1,2,4,8589934592,1,0] -18 hdfs://### HDFS PATH ### 5514 [1,2,4,8589934592,1,0] -180 hdfs://### HDFS PATH ### 1696 [1,2,4,8589934592,1,0] -181 hdfs://### HDFS PATH ### 1742 [1,2,4,8589934592,1,0] -183 hdfs://### HDFS PATH ### 5536 [1,2,4,8589934592,1,0] -186 hdfs://### HDFS PATH ### 5466 [1,2,4,8589934592,1,0] -187 hdfs://### HDFS PATH ### 1416 [1,2,4,8589934592,1,0] -187 hdfs://### HDFS PATH ### 2492 [1,2,4,8589934592,1,0] -187 hdfs://### HDFS PATH ### 4516 [1,2,4,8589934592,1,0] -189 hdfs://### HDFS PATH ### 5188 [1,2,4,8589934592,1,0] -19 hdfs://### HDFS PATH ### 2824 [1,2,4,8589934592,1,0] -190 hdfs://### HDFS PATH ### 4244 [1,2,4,8589934592,1,0] -191 hdfs://### HDFS PATH ### 2192 [1,2,4,8589934592,1,0] -191 hdfs://### HDFS PATH ### 3852 [1,2,4,8589934592,1,0] -192 hdfs://### HDFS PATH ### 1392 [1,2,4,8589934592,1,0] -193 hdfs://### HDFS PATH ### 126 [1,2,4,8589934592,1,0] -193 hdfs://### HDFS PATH ### 4078 [1,2,4,8589934592,1,0] -193 hdfs://### HDFS PATH ### 514 [1,2,4,8589934592,1,0] -194 hdfs://### HDFS PATH ### 5684 [1,2,4,8589934592,1,0] -195 hdfs://### HDFS PATH ### 3286 [1,2,4,8589934592,1,0] -195 hdfs://### HDFS PATH ### 886 [1,2,4,8589934592,1,0] -196 hdfs://### HDFS PATH ### 2410 [1,2,4,8589934592,1,0] -197 hdfs://### HDFS PATH ### 2108 [1,2,4,8589934592,1,0] -197 hdfs://### HDFS PATH ### 2480 [1,2,4,8589934592,1,0] -199 hdfs://### HDFS PATH ### 2180 [1,2,4,8589934592,1,0] -199 hdfs://### HDFS PATH ### 4806 [1,2,4,8589934592,1,0] -199 hdfs://### HDFS PATH ### 646 [1,2,4,8589934592,1,0] -2 hdfs://### HDFS PATH ### 4004 [1,2,4,8589934592,1,0] -20 hdfs://### HDFS PATH ### 1118 [1,2,4,8589934592,1,0] -200 hdfs://### HDFS PATH ### 4698 [1,2,4,8589934592,1,0] -200 hdfs://### HDFS PATH ### 5790 [1,2,4,8589934592,1,0] -201 hdfs://### HDFS PATH ### 4384 [1,2,4,8589934592,1,0] -202 hdfs://### HDFS PATH ### 3932 [1,2,4,8589934592,1,0] -203 hdfs://### HDFS PATH ### 4314 [1,2,4,8589934592,1,0] -203 hdfs://### HDFS PATH ### 944 [1,2,4,8589934592,1,0] -205 hdfs://### HDFS PATH ### 1046 [1,2,4,8589934592,1,0] -205 hdfs://### HDFS PATH ### 2272 [1,2,4,8589934592,1,0] -207 hdfs://### HDFS PATH ### 5022 [1,2,4,8589934592,1,0] -207 hdfs://### HDFS PATH ### 634 [1,2,4,8589934592,1,0] -208 hdfs://### HDFS PATH ### 1272 [1,2,4,8589934592,1,0] -208 hdfs://### HDFS PATH ### 1948 [1,2,4,8589934592,1,0] -208 hdfs://### HDFS PATH ### 670 [1,2,4,8589934592,1,0] -209 hdfs://### HDFS PATH ### 3504 [1,2,4,8589934592,1,0] -209 hdfs://### HDFS PATH ### 374 [1,2,4,8589934592,1,0] -213 hdfs://### HDFS PATH ### 1508 [1,2,4,8589934592,1,0] -213 hdfs://### HDFS PATH ### 220 [1,2,4,8589934592,1,0] -214 hdfs://### HDFS PATH ### 5116 [1,2,4,8589934592,1,0] -216 hdfs://### HDFS PATH ### 1520 [1,2,4,8589934592,1,0] -216 hdfs://### HDFS PATH ### 3650 [1,2,4,8589934592,1,0] -217 hdfs://### HDFS PATH ### 1860 [1,2,4,8589934592,1,0] -217 hdfs://### HDFS PATH ### 4396 [1,2,4,8589934592,1,0] -218 hdfs://### HDFS PATH ### 3446 [1,2,4,8589934592,1,0] -219 hdfs://### HDFS PATH ### 3710 [1,2,4,8589934592,1,0] -219 hdfs://### HDFS PATH ### 478 [1,2,4,8589934592,1,0] -221 hdfs://### HDFS PATH ### 1164 [1,2,4,8589934592,1,0] -221 hdfs://### HDFS PATH ### 1580 [1,2,4,8589934592,1,0] -222 hdfs://### HDFS PATH ### 5720 [1,2,4,8589934592,1,0] -223 hdfs://### HDFS PATH ### 3398 [1,2,4,8589934592,1,0] -223 hdfs://### HDFS PATH ### 3758 [1,2,4,8589934592,1,0] -224 hdfs://### HDFS PATH ### 174 [1,2,4,8589934592,1,0] -224 hdfs://### HDFS PATH ### 2892 [1,2,4,8589934592,1,0] -226 hdfs://### HDFS PATH ### 3048 [1,2,4,8589934592,1,0] -228 hdfs://### HDFS PATH ### 3458 [1,2,4,8589934592,1,0] -229 hdfs://### HDFS PATH ### 3202 [1,2,4,8589934592,1,0] -229 hdfs://### HDFS PATH ### 3956 [1,2,4,8589934592,1,0] -230 hdfs://### HDFS PATH ### 1730 [1,2,4,8589934592,1,0] -230 hdfs://### HDFS PATH ### 1936 [1,2,4,8589934592,1,0] -230 hdfs://### HDFS PATH ### 2260 [1,2,4,8589934592,1,0] -230 hdfs://### HDFS PATH ### 3580 [1,2,4,8589934592,1,0] -230 hdfs://### HDFS PATH ### 4914 [1,2,4,8589934592,1,0] -233 hdfs://### HDFS PATH ### 3214 [1,2,4,8589934592,1,0] -233 hdfs://### HDFS PATH ### 5140 [1,2,4,8589934592,1,0] -235 hdfs://### HDFS PATH ### 4046 [1,2,4,8589934592,1,0] -237 hdfs://### HDFS PATH ### 4722 [1,2,4,8589934592,1,0] -237 hdfs://### HDFS PATH ### 574 [1,2,4,8589934592,1,0] -238 hdfs://### HDFS PATH ### 0 [1,2,4,8589934592,1,0] -238 hdfs://### HDFS PATH ### 2746 [1,2,4,8589934592,1,0] -239 hdfs://### HDFS PATH ### 1496 [1,2,4,8589934592,1,0] -239 hdfs://### HDFS PATH ### 3722 [1,2,4,8589934592,1,0] -24 hdfs://### HDFS PATH ### 1972 [1,2,4,8589934592,1,0] -24 hdfs://### HDFS PATH ### 4594 [1,2,4,8589934592,1,0] -241 hdfs://### HDFS PATH ### 1662 [1,2,4,8589934592,1,0] -242 hdfs://### HDFS PATH ### 2940 [1,2,4,8589934592,1,0] -242 hdfs://### HDFS PATH ### 3012 [1,2,4,8589934592,1,0] -244 hdfs://### HDFS PATH ### 3872 [1,2,4,8589934592,1,0] -247 hdfs://### HDFS PATH ### 718 [1,2,4,8589934592,1,0] -248 hdfs://### HDFS PATH ### 4758 [1,2,4,8589934592,1,0] -249 hdfs://### HDFS PATH ### 5034 [1,2,4,8589934592,1,0] -252 hdfs://### HDFS PATH ### 454 [1,2,4,8589934592,1,0] -255 hdfs://### HDFS PATH ### 4616 [1,2,4,8589934592,1,0] -255 hdfs://### HDFS PATH ### 68 [1,2,4,8589934592,1,0] -256 hdfs://### HDFS PATH ### 3770 [1,2,4,8589934592,1,0] -256 hdfs://### HDFS PATH ### 5272 [1,2,4,8589934592,1,0] -257 hdfs://### HDFS PATH ### 4208 [1,2,4,8589934592,1,0] -258 hdfs://### HDFS PATH ### 4292 [1,2,4,8589934592,1,0] -26 hdfs://### HDFS PATH ### 2226 [1,2,4,8589934592,1,0] -26 hdfs://### HDFS PATH ### 5284 [1,2,4,8589934592,1,0] -260 hdfs://### HDFS PATH ### 1764 [1,2,4,8589934592,1,0] -262 hdfs://### HDFS PATH ### 4326 [1,2,4,8589934592,1,0] -263 hdfs://### HDFS PATH ### 3782 [1,2,4,8589934592,1,0] -265 hdfs://### HDFS PATH ### 114 [1,2,4,8589934592,1,0] -265 hdfs://### HDFS PATH ### 5046 [1,2,4,8589934592,1,0] -266 hdfs://### HDFS PATH ### 814 [1,2,4,8589934592,1,0] -27 hdfs://### HDFS PATH ### 34 [1,2,4,8589934592,1,0] -272 hdfs://### HDFS PATH ### 1836 [1,2,4,8589934592,1,0] -272 hdfs://### HDFS PATH ### 2976 [1,2,4,8589934592,1,0] -273 hdfs://### HDFS PATH ### 162 [1,2,4,8589934592,1,0] -273 hdfs://### HDFS PATH ### 2868 [1,2,4,8589934592,1,0] -273 hdfs://### HDFS PATH ### 5524 [1,2,4,8589934592,1,0] -274 hdfs://### HDFS PATH ### 3698 [1,2,4,8589934592,1,0] -275 hdfs://### HDFS PATH ### 1638 [1,2,4,8589934592,1,0] -277 hdfs://### HDFS PATH ### 1260 [1,2,4,8589934592,1,0] -277 hdfs://### HDFS PATH ### 2856 [1,2,4,8589934592,1,0] -277 hdfs://### HDFS PATH ### 362 [1,2,4,8589934592,1,0] -277 hdfs://### HDFS PATH ### 4902 [1,2,4,8589934592,1,0] -278 hdfs://### HDFS PATH ### 1544 [1,2,4,8589934592,1,0] -278 hdfs://### HDFS PATH ### 80 [1,2,4,8589934592,1,0] -28 hdfs://### HDFS PATH ### 5616 [1,2,4,8589934592,1,0] -280 hdfs://### HDFS PATH ### 1226 [1,2,4,8589934592,1,0] -280 hdfs://### HDFS PATH ### 3992 [1,2,4,8589934592,1,0] -281 hdfs://### HDFS PATH ### 350 [1,2,4,8589934592,1,0] -281 hdfs://### HDFS PATH ### 5548 [1,2,4,8589934592,1,0] -282 hdfs://### HDFS PATH ### 2468 [1,2,4,8589934592,1,0] -282 hdfs://### HDFS PATH ### 2722 [1,2,4,8589934592,1,0] -283 hdfs://### HDFS PATH ### 4022 [1,2,4,8589934592,1,0] -284 hdfs://### HDFS PATH ### 1708 [1,2,4,8589934592,1,0] -285 hdfs://### HDFS PATH ### 5478 [1,2,4,8589934592,1,0] -286 hdfs://### HDFS PATH ### 1404 [1,2,4,8589934592,1,0] -287 hdfs://### HDFS PATH ### 490 [1,2,4,8589934592,1,0] -288 hdfs://### HDFS PATH ### 2422 [1,2,4,8589934592,1,0] -288 hdfs://### HDFS PATH ### 3840 [1,2,4,8589934592,1,0] -289 hdfs://### HDFS PATH ### 1568 [1,2,4,8589934592,1,0] -291 hdfs://### HDFS PATH ### 4582 [1,2,4,8589934592,1,0] -292 hdfs://### HDFS PATH ### 466 [1,2,4,8589934592,1,0] -296 hdfs://### HDFS PATH ### 3626 [1,2,4,8589934592,1,0] -298 hdfs://### HDFS PATH ### 2168 [1,2,4,8589934592,1,0] -298 hdfs://### HDFS PATH ### 4456 [1,2,4,8589934592,1,0] -298 hdfs://### HDFS PATH ### 5386 [1,2,4,8589934592,1,0] -30 hdfs://### HDFS PATH ### 3494 [1,2,4,8589934592,1,0] -302 hdfs://### HDFS PATH ### 1034 [1,2,4,8589934592,1,0] -305 hdfs://### HDFS PATH ### 4782 [1,2,4,8589934592,1,0] -306 hdfs://### HDFS PATH ### 2880 [1,2,4,8589934592,1,0] -307 hdfs://### HDFS PATH ### 2812 [1,2,4,8589934592,1,0] -307 hdfs://### HDFS PATH ### 5672 [1,2,4,8589934592,1,0] -308 hdfs://### HDFS PATH ### 2388 [1,2,4,8589934592,1,0] -309 hdfs://### HDFS PATH ### 2904 [1,2,4,8589934592,1,0] -309 hdfs://### HDFS PATH ### 790 [1,2,4,8589934592,1,0] -310 hdfs://### HDFS PATH ### 4962 [1,2,4,8589934592,1,0] -311 hdfs://### HDFS PATH ### 1000 [1,2,4,8589934592,1,0] -311 hdfs://### HDFS PATH ### 1626 [1,2,4,8589934592,1,0] -311 hdfs://### HDFS PATH ### 22 [1,2,4,8589934592,1,0] -315 hdfs://### HDFS PATH ### 5594 [1,2,4,8589934592,1,0] -316 hdfs://### HDFS PATH ### 1012 [1,2,4,8589934592,1,0] -316 hdfs://### HDFS PATH ### 2576 [1,2,4,8589934592,1,0] -316 hdfs://### HDFS PATH ### 3944 [1,2,4,8589934592,1,0] -317 hdfs://### HDFS PATH ### 3104 [1,2,4,8589934592,1,0] -317 hdfs://### HDFS PATH ### 4974 [1,2,4,8589934592,1,0] -318 hdfs://### HDFS PATH ### 1602 [1,2,4,8589934592,1,0] -318 hdfs://### HDFS PATH ### 2504 [1,2,4,8589934592,1,0] -318 hdfs://### HDFS PATH ### 2516 [1,2,4,8589934592,1,0] -321 hdfs://### HDFS PATH ### 3308 [1,2,4,8589934592,1,0] -321 hdfs://### HDFS PATH ### 4090 [1,2,4,8589934592,1,0] -322 hdfs://### HDFS PATH ### 2096 [1,2,4,8589934592,1,0] -322 hdfs://### HDFS PATH ### 3250 [1,2,4,8589934592,1,0] -323 hdfs://### HDFS PATH ### 4878 [1,2,4,8589934592,1,0] -325 hdfs://### HDFS PATH ### 4890 [1,2,4,8589934592,1,0] -325 hdfs://### HDFS PATH ### 862 [1,2,4,8589934592,1,0] -327 hdfs://### HDFS PATH ### 2248 [1,2,4,8589934592,1,0] -327 hdfs://### HDFS PATH ### 2928 [1,2,4,8589934592,1,0] -327 hdfs://### HDFS PATH ### 338 [1,2,4,8589934592,1,0] -33 hdfs://### HDFS PATH ### 3592 [1,2,4,8589934592,1,0] -331 hdfs://### HDFS PATH ### 2988 [1,2,4,8589934592,1,0] -331 hdfs://### HDFS PATH ### 4034 [1,2,4,8589934592,1,0] -332 hdfs://### HDFS PATH ### 1614 [1,2,4,8589934592,1,0] -333 hdfs://### HDFS PATH ### 1684 [1,2,4,8589934592,1,0] -333 hdfs://### HDFS PATH ### 4986 [1,2,4,8589934592,1,0] -335 hdfs://### HDFS PATH ### 4102 [1,2,4,8589934592,1,0] -336 hdfs://### HDFS PATH ### 3148 [1,2,4,8589934592,1,0] -338 hdfs://### HDFS PATH ### 526 [1,2,4,8589934592,1,0] -339 hdfs://### HDFS PATH ### 956 [1,2,4,8589934592,1,0] -34 hdfs://### HDFS PATH ### 3192 [1,2,4,8589934592,1,0] -341 hdfs://### HDFS PATH ### 5406 [1,2,4,8589934592,1,0] -342 hdfs://### HDFS PATH ### 3558 [1,2,4,8589934592,1,0] -342 hdfs://### HDFS PATH ### 838 [1,2,4,8589934592,1,0] -344 hdfs://### HDFS PATH ### 3674 [1,2,4,8589934592,1,0] -344 hdfs://### HDFS PATH ### 5560 [1,2,4,8589934592,1,0] -345 hdfs://### HDFS PATH ### 1082 [1,2,4,8589934592,1,0] -348 hdfs://### HDFS PATH ### 1882 [1,2,4,8589934592,1,0] -348 hdfs://### HDFS PATH ### 1960 [1,2,4,8589934592,1,0] -348 hdfs://### HDFS PATH ### 4338 [1,2,4,8589934592,1,0] -348 hdfs://### HDFS PATH ### 5490 [1,2,4,8589934592,1,0] -348 hdfs://### HDFS PATH ### 5660 [1,2,4,8589934592,1,0] -35 hdfs://### HDFS PATH ### 1238 [1,2,4,8589934592,1,0] -35 hdfs://### HDFS PATH ### 3138 [1,2,4,8589934592,1,0] -35 hdfs://### HDFS PATH ### 4012 [1,2,4,8589934592,1,0] -351 hdfs://### HDFS PATH ### 4604 [1,2,4,8589934592,1,0] -353 hdfs://### HDFS PATH ### 1812 [1,2,4,8589934592,1,0] -353 hdfs://### HDFS PATH ### 5092 [1,2,4,8589934592,1,0] -356 hdfs://### HDFS PATH ### 1284 [1,2,4,8589934592,1,0] -360 hdfs://### HDFS PATH ### 4746 [1,2,4,8589934592,1,0] -362 hdfs://### HDFS PATH ### 5454 [1,2,4,8589934592,1,0] -364 hdfs://### HDFS PATH ### 2662 [1,2,4,8589934592,1,0] -365 hdfs://### HDFS PATH ### 802 [1,2,4,8589934592,1,0] -366 hdfs://### HDFS PATH ### 4138 [1,2,4,8589934592,1,0] -367 hdfs://### HDFS PATH ### 3662 [1,2,4,8589934592,1,0] -367 hdfs://### HDFS PATH ### 850 [1,2,4,8589934592,1,0] -368 hdfs://### HDFS PATH ### 3602 [1,2,4,8589934592,1,0] -369 hdfs://### HDFS PATH ### 186 [1,2,4,8589934592,1,0] -369 hdfs://### HDFS PATH ### 2564 [1,2,4,8589934592,1,0] -369 hdfs://### HDFS PATH ### 2952 [1,2,4,8589934592,1,0] -37 hdfs://### HDFS PATH ### 328 [1,2,4,8589934592,1,0] -37 hdfs://### HDFS PATH ### 5626 [1,2,4,8589934592,1,0] -373 hdfs://### HDFS PATH ### 1824 [1,2,4,8589934592,1,0] -374 hdfs://### HDFS PATH ### 268 [1,2,4,8589934592,1,0] -375 hdfs://### HDFS PATH ### 5212 [1,2,4,8589934592,1,0] -377 hdfs://### HDFS PATH ### 766 [1,2,4,8589934592,1,0] -378 hdfs://### HDFS PATH ### 1152 [1,2,4,8589934592,1,0] -379 hdfs://### HDFS PATH ### 5328 [1,2,4,8589934592,1,0] -382 hdfs://### HDFS PATH ### 1320 [1,2,4,8589934592,1,0] -382 hdfs://### HDFS PATH ### 4528 [1,2,4,8589934592,1,0] -384 hdfs://### HDFS PATH ### 1788 [1,2,4,8589934592,1,0] -384 hdfs://### HDFS PATH ### 5260 [1,2,4,8589934592,1,0] -384 hdfs://### HDFS PATH ### 5316 [1,2,4,8589934592,1,0] -386 hdfs://### HDFS PATH ### 1356 [1,2,4,8589934592,1,0] -389 hdfs://### HDFS PATH ### 2916 [1,2,4,8589934592,1,0] -392 hdfs://### HDFS PATH ### 2964 [1,2,4,8589934592,1,0] -393 hdfs://### HDFS PATH ### 2132 [1,2,4,8589934592,1,0] -394 hdfs://### HDFS PATH ### 562 [1,2,4,8589934592,1,0] -395 hdfs://### HDFS PATH ### 2710 [1,2,4,8589934592,1,0] -395 hdfs://### HDFS PATH ### 3116 [1,2,4,8589934592,1,0] -396 hdfs://### HDFS PATH ### 3092 [1,2,4,8589934592,1,0] -396 hdfs://### HDFS PATH ### 4372 [1,2,4,8589934592,1,0] -396 hdfs://### HDFS PATH ### 706 [1,2,4,8589934592,1,0] -397 hdfs://### HDFS PATH ### 4558 [1,2,4,8589934592,1,0] -397 hdfs://### HDFS PATH ### 778 [1,2,4,8589934592,1,0] -399 hdfs://### HDFS PATH ### 1296 [1,2,4,8589934592,1,0] -399 hdfs://### HDFS PATH ### 694 [1,2,4,8589934592,1,0] -4 hdfs://### HDFS PATH ### 1218 [1,2,4,8589934592,1,0] -400 hdfs://### HDFS PATH ### 5778 [1,2,4,8589934592,1,0] -401 hdfs://### HDFS PATH ### 138 [1,2,4,8589934592,1,0] -401 hdfs://### HDFS PATH ### 3000 [1,2,4,8589934592,1,0] -401 hdfs://### HDFS PATH ### 3828 [1,2,4,8589934592,1,0] -401 hdfs://### HDFS PATH ### 4268 [1,2,4,8589934592,1,0] -401 hdfs://### HDFS PATH ### 5224 [1,2,4,8589934592,1,0] -402 hdfs://### HDFS PATH ### 3080 [1,2,4,8589934592,1,0] -403 hdfs://### HDFS PATH ### 406 [1,2,4,8589934592,1,0] -403 hdfs://### HDFS PATH ### 4162 [1,2,4,8589934592,1,0] -403 hdfs://### HDFS PATH ### 5766 [1,2,4,8589934592,1,0] -404 hdfs://### HDFS PATH ### 1776 [1,2,4,8589934592,1,0] -404 hdfs://### HDFS PATH ### 2318 [1,2,4,8589934592,1,0] -406 hdfs://### HDFS PATH ### 244 [1,2,4,8589934592,1,0] -406 hdfs://### HDFS PATH ### 4220 [1,2,4,8589934592,1,0] -406 hdfs://### HDFS PATH ### 4256 [1,2,4,8589934592,1,0] -406 hdfs://### HDFS PATH ### 5152 [1,2,4,8589934592,1,0] -407 hdfs://### HDFS PATH ### 5248 [1,2,4,8589934592,1,0] -409 hdfs://### HDFS PATH ### 2528 [1,2,4,8589934592,1,0] -409 hdfs://### HDFS PATH ### 4232 [1,2,4,8589934592,1,0] -409 hdfs://### HDFS PATH ### 56 [1,2,4,8589934592,1,0] -41 hdfs://### HDFS PATH ### 3388 [1,2,4,8589934592,1,0] -411 hdfs://### HDFS PATH ### 1924 [1,2,4,8589934592,1,0] -413 hdfs://### HDFS PATH ### 2600 [1,2,4,8589934592,1,0] -413 hdfs://### HDFS PATH ### 610 [1,2,4,8589934592,1,0] -414 hdfs://### HDFS PATH ### 4686 [1,2,4,8589934592,1,0] -414 hdfs://### HDFS PATH ### 5696 [1,2,4,8589934592,1,0] -417 hdfs://### HDFS PATH ### 430 [1,2,4,8589934592,1,0] -417 hdfs://### HDFS PATH ### 4794 [1,2,4,8589934592,1,0] -417 hdfs://### HDFS PATH ### 730 [1,2,4,8589934592,1,0] -418 hdfs://### HDFS PATH ### 2204 [1,2,4,8589934592,1,0] -419 hdfs://### HDFS PATH ### 2758 [1,2,4,8589934592,1,0] -42 hdfs://### HDFS PATH ### 2030 [1,2,4,8589934592,1,0] -42 hdfs://### HDFS PATH ### 3298 [1,2,4,8589934592,1,0] -421 hdfs://### HDFS PATH ### 5236 [1,2,4,8589934592,1,0] -424 hdfs://### HDFS PATH ### 4350 [1,2,4,8589934592,1,0] -424 hdfs://### HDFS PATH ### 4504 [1,2,4,8589934592,1,0] -427 hdfs://### HDFS PATH ### 1248 [1,2,4,8589934592,1,0] -429 hdfs://### HDFS PATH ### 256 [1,2,4,8589934592,1,0] -429 hdfs://### HDFS PATH ### 4842 [1,2,4,8589934592,1,0] -43 hdfs://### HDFS PATH ### 2330 [1,2,4,8589934592,1,0] -430 hdfs://### HDFS PATH ### 1532 [1,2,4,8589934592,1,0] -430 hdfs://### HDFS PATH ### 3320 [1,2,4,8589934592,1,0] -430 hdfs://### HDFS PATH ### 442 [1,2,4,8589934592,1,0] -431 hdfs://### HDFS PATH ### 1994 [1,2,4,8589934592,1,0] -431 hdfs://### HDFS PATH ### 4420 [1,2,4,8589934592,1,0] -431 hdfs://### HDFS PATH ### 4480 [1,2,4,8589934592,1,0] -432 hdfs://### HDFS PATH ### 3920 [1,2,4,8589934592,1,0] -435 hdfs://### HDFS PATH ### 2834 [1,2,4,8589934592,1,0] -436 hdfs://### HDFS PATH ### 2340 [1,2,4,8589934592,1,0] -437 hdfs://### HDFS PATH ### 1368 [1,2,4,8589934592,1,0] -438 hdfs://### HDFS PATH ### 1070 [1,2,4,8589934592,1,0] -438 hdfs://### HDFS PATH ### 3884 [1,2,4,8589934592,1,0] -438 hdfs://### HDFS PATH ### 4662 [1,2,4,8589934592,1,0] -439 hdfs://### HDFS PATH ### 4734 [1,2,4,8589934592,1,0] -439 hdfs://### HDFS PATH ### 826 [1,2,4,8589934592,1,0] -44 hdfs://### HDFS PATH ### 4068 [1,2,4,8589934592,1,0] -443 hdfs://### HDFS PATH ### 4866 [1,2,4,8589934592,1,0] -444 hdfs://### HDFS PATH ### 4818 [1,2,4,8589934592,1,0] -446 hdfs://### HDFS PATH ### 538 [1,2,4,8589934592,1,0] -448 hdfs://### HDFS PATH ### 5636 [1,2,4,8589934592,1,0] -449 hdfs://### HDFS PATH ### 3434 [1,2,4,8589934592,1,0] -452 hdfs://### HDFS PATH ### 3024 [1,2,4,8589934592,1,0] -453 hdfs://### HDFS PATH ### 3482 [1,2,4,8589934592,1,0] -454 hdfs://### HDFS PATH ### 2144 [1,2,4,8589934592,1,0] -454 hdfs://### HDFS PATH ### 4432 [1,2,4,8589934592,1,0] -454 hdfs://### HDFS PATH ### 5200 [1,2,4,8589934592,1,0] -455 hdfs://### HDFS PATH ### 976 [1,2,4,8589934592,1,0] -457 hdfs://### HDFS PATH ### 2446 [1,2,4,8589934592,1,0] -458 hdfs://### HDFS PATH ### 3356 [1,2,4,8589934592,1,0] -458 hdfs://### HDFS PATH ### 5442 [1,2,4,8589934592,1,0] -459 hdfs://### HDFS PATH ### 1450 [1,2,4,8589934592,1,0] -459 hdfs://### HDFS PATH ### 550 [1,2,4,8589934592,1,0] -460 hdfs://### HDFS PATH ### 5010 [1,2,4,8589934592,1,0] -462 hdfs://### HDFS PATH ### 5128 [1,2,4,8589934592,1,0] -462 hdfs://### HDFS PATH ### 5350 [1,2,4,8589934592,1,0] -463 hdfs://### HDFS PATH ### 1982 [1,2,4,8589934592,1,0] -463 hdfs://### HDFS PATH ### 3980 [1,2,4,8589934592,1,0] -466 hdfs://### HDFS PATH ### 1894 [1,2,4,8589934592,1,0] -466 hdfs://### HDFS PATH ### 4126 [1,2,4,8589934592,1,0] -466 hdfs://### HDFS PATH ### 658 [1,2,4,8589934592,1,0] -467 hdfs://### HDFS PATH ### 3908 [1,2,4,8589934592,1,0] -468 hdfs://### HDFS PATH ### 2120 [1,2,4,8589934592,1,0] -468 hdfs://### HDFS PATH ### 2376 [1,2,4,8589934592,1,0] -468 hdfs://### HDFS PATH ### 3526 [1,2,4,8589934592,1,0] -468 hdfs://### HDFS PATH ### 4950 [1,2,4,8589934592,1,0] -469 hdfs://### HDFS PATH ### 1380 [1,2,4,8589934592,1,0] -469 hdfs://### HDFS PATH ### 2364 [1,2,4,8589934592,1,0] -469 hdfs://### HDFS PATH ### 292 [1,2,4,8589934592,1,0] -469 hdfs://### HDFS PATH ### 3968 [1,2,4,8589934592,1,0] -469 hdfs://### HDFS PATH ### 5582 [1,2,4,8589934592,1,0] -47 hdfs://### HDFS PATH ### 1198 [1,2,4,8589934592,1,0] -470 hdfs://### HDFS PATH ### 2540 [1,2,4,8589934592,1,0] -472 hdfs://### HDFS PATH ### 3238 [1,2,4,8589934592,1,0] -475 hdfs://### HDFS PATH ### 898 [1,2,4,8589934592,1,0] -477 hdfs://### HDFS PATH ### 5708 [1,2,4,8589934592,1,0] -478 hdfs://### HDFS PATH ### 4444 [1,2,4,8589934592,1,0] -478 hdfs://### HDFS PATH ### 4926 [1,2,4,8589934592,1,0] -479 hdfs://### HDFS PATH ### 4770 [1,2,4,8589934592,1,0] -480 hdfs://### HDFS PATH ### 3816 [1,2,4,8589934592,1,0] -480 hdfs://### HDFS PATH ### 4570 [1,2,4,8589934592,1,0] -480 hdfs://### HDFS PATH ### 5058 [1,2,4,8589934592,1,0] -481 hdfs://### HDFS PATH ### 2434 [1,2,4,8589934592,1,0] -482 hdfs://### HDFS PATH ### 586 [1,2,4,8589934592,1,0] -483 hdfs://### HDFS PATH ### 4174 [1,2,4,8589934592,1,0] -484 hdfs://### HDFS PATH ### 102 [1,2,4,8589934592,1,0] -485 hdfs://### HDFS PATH ### 3734 [1,2,4,8589934592,1,0] -487 hdfs://### HDFS PATH ### 3804 [1,2,4,8589934592,1,0] -489 hdfs://### HDFS PATH ### 1128 [1,2,4,8589934592,1,0] -489 hdfs://### HDFS PATH ### 1800 [1,2,4,8589934592,1,0] -489 hdfs://### HDFS PATH ### 3344 [1,2,4,8589934592,1,0] -489 hdfs://### HDFS PATH ### 742 [1,2,4,8589934592,1,0] -490 hdfs://### HDFS PATH ### 2640 [1,2,4,8589934592,1,0] -491 hdfs://### HDFS PATH ### 4710 [1,2,4,8589934592,1,0] -492 hdfs://### HDFS PATH ### 3410 [1,2,4,8589934592,1,0] -492 hdfs://### HDFS PATH ### 5362 [1,2,4,8589934592,1,0] -493 hdfs://### HDFS PATH ### 4998 [1,2,4,8589934592,1,0] -494 hdfs://### HDFS PATH ### 622 [1,2,4,8589934592,1,0] -495 hdfs://### HDFS PATH ### 316 [1,2,4,8589934592,1,0] -496 hdfs://### HDFS PATH ### 2076 [1,2,4,8589934592,1,0] -497 hdfs://### HDFS PATH ### 3068 [1,2,4,8589934592,1,0] -498 hdfs://### HDFS PATH ### 1332 [1,2,4,8589934592,1,0] -498 hdfs://### HDFS PATH ### 3262 [1,2,4,8589934592,1,0] -498 hdfs://### HDFS PATH ### 5418 [1,2,4,8589934592,1,0] -5 hdfs://### HDFS PATH ### 3060 [1,2,4,8589934592,1,0] -5 hdfs://### HDFS PATH ### 3864 [1,2,4,8589934592,1,0] -5 hdfs://### HDFS PATH ### 4540 [1,2,4,8589934592,1,0] -51 hdfs://### HDFS PATH ### 1462 [1,2,4,8589934592,1,0] -51 hdfs://### HDFS PATH ### 2308 [1,2,4,8589934592,1,0] -53 hdfs://### HDFS PATH ### 4186 [1,2,4,8589934592,1,0] -54 hdfs://### HDFS PATH ### 1440 [1,2,4,8589934592,1,0] -57 hdfs://### HDFS PATH ### 1024 [1,2,4,8589934592,1,0] -58 hdfs://### HDFS PATH ### 1906 [1,2,4,8589934592,1,0] -58 hdfs://### HDFS PATH ### 3128 [1,2,4,8589934592,1,0] -64 hdfs://### HDFS PATH ### 3516 [1,2,4,8589934592,1,0] -65 hdfs://### HDFS PATH ### 1592 [1,2,4,8589934592,1,0] -66 hdfs://### HDFS PATH ### 198 [1,2,4,8589934592,1,0] -67 hdfs://### HDFS PATH ### 1754 [1,2,4,8589934592,1,0] -67 hdfs://### HDFS PATH ### 5306 [1,2,4,8589934592,1,0] -69 hdfs://### HDFS PATH ### 3570 [1,2,4,8589934592,1,0] -70 hdfs://### HDFS PATH ### 3794 [1,2,4,8589934592,1,0] -70 hdfs://### HDFS PATH ### 4548 [1,2,4,8589934592,1,0] -70 hdfs://### HDFS PATH ### 4640 [1,2,4,8589934592,1,0] -72 hdfs://### HDFS PATH ### 1208 [1,2,4,8589934592,1,0] -72 hdfs://### HDFS PATH ### 2792 [1,2,4,8589934592,1,0] -74 hdfs://### HDFS PATH ### 3548 [1,2,4,8589934592,1,0] -76 hdfs://### HDFS PATH ### 3378 [1,2,4,8589934592,1,0] -76 hdfs://### HDFS PATH ### 3538 [1,2,4,8589934592,1,0] -77 hdfs://### HDFS PATH ### 2622 [1,2,4,8589934592,1,0] -78 hdfs://### HDFS PATH ### 3368 [1,2,4,8589934592,1,0] -8 hdfs://### HDFS PATH ### 1916 [1,2,4,8589934592,1,0] -80 hdfs://### HDFS PATH ### 4058 [1,2,4,8589934592,1,0] -82 hdfs://### HDFS PATH ### 396 [1,2,4,8589934592,1,0] -83 hdfs://### HDFS PATH ### 1674 [1,2,4,8589934592,1,0] -83 hdfs://### HDFS PATH ### 5070 [1,2,4,8589934592,1,0] -84 hdfs://### HDFS PATH ### 1872 [1,2,4,8589934592,1,0] -84 hdfs://### HDFS PATH ### 5606 [1,2,4,8589934592,1,0] -85 hdfs://### HDFS PATH ### 2612 [1,2,4,8589934592,1,0] -86 hdfs://### HDFS PATH ### 12 [1,2,4,8589934592,1,0] -87 hdfs://### HDFS PATH ### 2652 [1,2,4,8589934592,1,0] -9 hdfs://### HDFS PATH ### 5398 [1,2,4,8589934592,1,0] -90 hdfs://### HDFS PATH ### 2802 [1,2,4,8589934592,1,0] -90 hdfs://### HDFS PATH ### 4304 [1,2,4,8589934592,1,0] -90 hdfs://### HDFS PATH ### 5744 [1,2,4,8589934592,1,0] -92 hdfs://### HDFS PATH ### 1176 [1,2,4,8589934592,1,0] -95 hdfs://### HDFS PATH ### 2400 [1,2,4,8589934592,1,0] -95 hdfs://### HDFS PATH ### 3160 [1,2,4,8589934592,1,0] -96 hdfs://### HDFS PATH ### 2216 [1,2,4,8589934592,1,0] -97 hdfs://### HDFS PATH ### 5572 [1,2,4,8589934592,1,0] -97 hdfs://### HDFS PATH ### 5802 [1,2,4,8589934592,1,0] -98 hdfs://### HDFS PATH ### 2458 [1,2,4,8589934592,1,0] -98 hdfs://### HDFS PATH ### 92 [1,2,4,8589934592,1,0] -PREHOOK: query: SELECT * FROM default__src_src2_index__ -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src2_index__ -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT * FROM default__src_src2_index__ -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src2_index__ -POSTHOOK: Output: hdfs://### HDFS PATH ### -val_0 hdfs://### HDFS PATH ### 2088 [1,2,4,8589934592,1,0] -val_0 hdfs://### HDFS PATH ### 2632 [1,2,4,8589934592,1,0] -val_0 hdfs://### HDFS PATH ### 968 [1,2,4,8589934592,1,0] -val_10 hdfs://### HDFS PATH ### 2846 [1,2,4,8589934592,1,0] -val_100 hdfs://### HDFS PATH ### 2156 [1,2,4,8589934592,1,0] -val_100 hdfs://### HDFS PATH ### 5374 [1,2,4,8589934592,1,0] -val_103 hdfs://### HDFS PATH ### 1484 [1,2,4,8589934592,1,0] -val_103 hdfs://### HDFS PATH ### 3614 [1,2,4,8589934592,1,0] -val_104 hdfs://### HDFS PATH ### 4114 [1,2,4,8589934592,1,0] -val_104 hdfs://### HDFS PATH ### 4628 [1,2,4,8589934592,1,0] -val_105 hdfs://### HDFS PATH ### 4196 [1,2,4,8589934592,1,0] -val_11 hdfs://### HDFS PATH ### 3170 [1,2,4,8589934592,1,0] -val_111 hdfs://### HDFS PATH ### 1186 [1,2,4,8589934592,1,0] -val_113 hdfs://### HDFS PATH ### 3638 [1,2,4,8589934592,1,0] -val_113 hdfs://### HDFS PATH ### 920 [1,2,4,8589934592,1,0] -val_114 hdfs://### HDFS PATH ### 4280 [1,2,4,8589934592,1,0] -val_116 hdfs://### HDFS PATH ### 3746 [1,2,4,8589934592,1,0] -val_118 hdfs://### HDFS PATH ### 2686 [1,2,4,8589934592,1,0] -val_118 hdfs://### HDFS PATH ### 2780 [1,2,4,8589934592,1,0] -val_119 hdfs://### HDFS PATH ### 2064 [1,2,4,8589934592,1,0] -val_119 hdfs://### HDFS PATH ### 3332 [1,2,4,8589934592,1,0] -val_119 hdfs://### HDFS PATH ### 4674 [1,2,4,8589934592,1,0] -val_12 hdfs://### HDFS PATH ### 1720 [1,2,4,8589934592,1,0] -val_12 hdfs://### HDFS PATH ### 4362 [1,2,4,8589934592,1,0] -val_120 hdfs://### HDFS PATH ### 2284 [1,2,4,8589934592,1,0] -val_120 hdfs://### HDFS PATH ### 4830 [1,2,4,8589934592,1,0] -val_125 hdfs://### HDFS PATH ### 1344 [1,2,4,8589934592,1,0] -val_125 hdfs://### HDFS PATH ### 4468 [1,2,4,8589934592,1,0] -val_126 hdfs://### HDFS PATH ### 5732 [1,2,4,8589934592,1,0] -val_128 hdfs://### HDFS PATH ### 208 [1,2,4,8589934592,1,0] -val_128 hdfs://### HDFS PATH ### 3896 [1,2,4,8589934592,1,0] -val_128 hdfs://### HDFS PATH ### 988 [1,2,4,8589934592,1,0] -val_129 hdfs://### HDFS PATH ### 1094 [1,2,4,8589934592,1,0] -val_129 hdfs://### HDFS PATH ### 2040 [1,2,4,8589934592,1,0] -val_131 hdfs://### HDFS PATH ### 2296 [1,2,4,8589934592,1,0] -val_133 hdfs://### HDFS PATH ### 5164 [1,2,4,8589934592,1,0] -val_134 hdfs://### HDFS PATH ### 2698 [1,2,4,8589934592,1,0] -val_134 hdfs://### HDFS PATH ### 5294 [1,2,4,8589934592,1,0] -val_136 hdfs://### HDFS PATH ### 5080 [1,2,4,8589934592,1,0] -val_137 hdfs://### HDFS PATH ### 1650 [1,2,4,8589934592,1,0] -val_137 hdfs://### HDFS PATH ### 2552 [1,2,4,8589934592,1,0] -val_138 hdfs://### HDFS PATH ### 1472 [1,2,4,8589934592,1,0] -val_138 hdfs://### HDFS PATH ### 1848 [1,2,4,8589934592,1,0] -val_138 hdfs://### HDFS PATH ### 2734 [1,2,4,8589934592,1,0] -val_138 hdfs://### HDFS PATH ### 3470 [1,2,4,8589934592,1,0] -val_143 hdfs://### HDFS PATH ### 3226 [1,2,4,8589934592,1,0] -val_145 hdfs://### HDFS PATH ### 304 [1,2,4,8589934592,1,0] -val_146 hdfs://### HDFS PATH ### 232 [1,2,4,8589934592,1,0] -val_146 hdfs://### HDFS PATH ### 5430 [1,2,4,8589934592,1,0] -val_149 hdfs://### HDFS PATH ### 1058 [1,2,4,8589934592,1,0] -val_149 hdfs://### HDFS PATH ### 3422 [1,2,4,8589934592,1,0] -val_15 hdfs://### HDFS PATH ### 2770 [1,2,4,8589934592,1,0] -val_15 hdfs://### HDFS PATH ### 386 [1,2,4,8589934592,1,0] -val_150 hdfs://### HDFS PATH ### 150 [1,2,4,8589934592,1,0] -val_152 hdfs://### HDFS PATH ### 280 [1,2,4,8589934592,1,0] -val_152 hdfs://### HDFS PATH ### 5648 [1,2,4,8589934592,1,0] -val_153 hdfs://### HDFS PATH ### 502 [1,2,4,8589934592,1,0] -val_155 hdfs://### HDFS PATH ### 932 [1,2,4,8589934592,1,0] -val_156 hdfs://### HDFS PATH ### 2352 [1,2,4,8589934592,1,0] -val_157 hdfs://### HDFS PATH ### 1140 [1,2,4,8589934592,1,0] -val_158 hdfs://### HDFS PATH ### 2052 [1,2,4,8589934592,1,0] -val_160 hdfs://### HDFS PATH ### 3274 [1,2,4,8589934592,1,0] -val_162 hdfs://### HDFS PATH ### 754 [1,2,4,8589934592,1,0] -val_163 hdfs://### HDFS PATH ### 4650 [1,2,4,8589934592,1,0] -val_164 hdfs://### HDFS PATH ### 4408 [1,2,4,8589934592,1,0] -val_164 hdfs://### HDFS PATH ### 4492 [1,2,4,8589934592,1,0] -val_165 hdfs://### HDFS PATH ### 2236 [1,2,4,8589934592,1,0] -val_165 hdfs://### HDFS PATH ### 44 [1,2,4,8589934592,1,0] -val_166 hdfs://### HDFS PATH ### 418 [1,2,4,8589934592,1,0] -val_167 hdfs://### HDFS PATH ### 3686 [1,2,4,8589934592,1,0] -val_167 hdfs://### HDFS PATH ### 5502 [1,2,4,8589934592,1,0] -val_167 hdfs://### HDFS PATH ### 874 [1,2,4,8589934592,1,0] -val_168 hdfs://### HDFS PATH ### 3180 [1,2,4,8589934592,1,0] -val_169 hdfs://### HDFS PATH ### 1308 [1,2,4,8589934592,1,0] -val_169 hdfs://### HDFS PATH ### 2588 [1,2,4,8589934592,1,0] -val_169 hdfs://### HDFS PATH ### 4854 [1,2,4,8589934592,1,0] -val_169 hdfs://### HDFS PATH ### 5754 [1,2,4,8589934592,1,0] -val_17 hdfs://### HDFS PATH ### 910 [1,2,4,8589934592,1,0] -val_170 hdfs://### HDFS PATH ### 1106 [1,2,4,8589934592,1,0] -val_172 hdfs://### HDFS PATH ### 2018 [1,2,4,8589934592,1,0] -val_172 hdfs://### HDFS PATH ### 5104 [1,2,4,8589934592,1,0] -val_174 hdfs://### HDFS PATH ### 598 [1,2,4,8589934592,1,0] -val_174 hdfs://### HDFS PATH ### 682 [1,2,4,8589934592,1,0] -val_175 hdfs://### HDFS PATH ### 4150 [1,2,4,8589934592,1,0] -val_175 hdfs://### HDFS PATH ### 5176 [1,2,4,8589934592,1,0] -val_176 hdfs://### HDFS PATH ### 1428 [1,2,4,8589934592,1,0] -val_176 hdfs://### HDFS PATH ### 1556 [1,2,4,8589934592,1,0] -val_177 hdfs://### HDFS PATH ### 3036 [1,2,4,8589934592,1,0] -val_178 hdfs://### HDFS PATH ### 4938 [1,2,4,8589934592,1,0] -val_179 hdfs://### HDFS PATH ### 2006 [1,2,4,8589934592,1,0] -val_179 hdfs://### HDFS PATH ### 2674 [1,2,4,8589934592,1,0] -val_18 hdfs://### HDFS PATH ### 5340 [1,2,4,8589934592,1,0] -val_18 hdfs://### HDFS PATH ### 5514 [1,2,4,8589934592,1,0] -val_180 hdfs://### HDFS PATH ### 1696 [1,2,4,8589934592,1,0] -val_181 hdfs://### HDFS PATH ### 1742 [1,2,4,8589934592,1,0] -val_183 hdfs://### HDFS PATH ### 5536 [1,2,4,8589934592,1,0] -val_186 hdfs://### HDFS PATH ### 5466 [1,2,4,8589934592,1,0] -val_187 hdfs://### HDFS PATH ### 1416 [1,2,4,8589934592,1,0] -val_187 hdfs://### HDFS PATH ### 2492 [1,2,4,8589934592,1,0] -val_187 hdfs://### HDFS PATH ### 4516 [1,2,4,8589934592,1,0] -val_189 hdfs://### HDFS PATH ### 5188 [1,2,4,8589934592,1,0] -val_19 hdfs://### HDFS PATH ### 2824 [1,2,4,8589934592,1,0] -val_190 hdfs://### HDFS PATH ### 4244 [1,2,4,8589934592,1,0] -val_191 hdfs://### HDFS PATH ### 2192 [1,2,4,8589934592,1,0] -val_191 hdfs://### HDFS PATH ### 3852 [1,2,4,8589934592,1,0] -val_192 hdfs://### HDFS PATH ### 1392 [1,2,4,8589934592,1,0] -val_193 hdfs://### HDFS PATH ### 126 [1,2,4,8589934592,1,0] -val_193 hdfs://### HDFS PATH ### 4078 [1,2,4,8589934592,1,0] -val_193 hdfs://### HDFS PATH ### 514 [1,2,4,8589934592,1,0] -val_194 hdfs://### HDFS PATH ### 5684 [1,2,4,8589934592,1,0] -val_195 hdfs://### HDFS PATH ### 3286 [1,2,4,8589934592,1,0] -val_195 hdfs://### HDFS PATH ### 886 [1,2,4,8589934592,1,0] -val_196 hdfs://### HDFS PATH ### 2410 [1,2,4,8589934592,1,0] -val_197 hdfs://### HDFS PATH ### 2108 [1,2,4,8589934592,1,0] -val_197 hdfs://### HDFS PATH ### 2480 [1,2,4,8589934592,1,0] -val_199 hdfs://### HDFS PATH ### 2180 [1,2,4,8589934592,1,0] -val_199 hdfs://### HDFS PATH ### 4806 [1,2,4,8589934592,1,0] -val_199 hdfs://### HDFS PATH ### 646 [1,2,4,8589934592,1,0] -val_2 hdfs://### HDFS PATH ### 4004 [1,2,4,8589934592,1,0] -val_20 hdfs://### HDFS PATH ### 1118 [1,2,4,8589934592,1,0] -val_200 hdfs://### HDFS PATH ### 4698 [1,2,4,8589934592,1,0] -val_200 hdfs://### HDFS PATH ### 5790 [1,2,4,8589934592,1,0] -val_201 hdfs://### HDFS PATH ### 4384 [1,2,4,8589934592,1,0] -val_202 hdfs://### HDFS PATH ### 3932 [1,2,4,8589934592,1,0] -val_203 hdfs://### HDFS PATH ### 4314 [1,2,4,8589934592,1,0] -val_203 hdfs://### HDFS PATH ### 944 [1,2,4,8589934592,1,0] -val_205 hdfs://### HDFS PATH ### 1046 [1,2,4,8589934592,1,0] -val_205 hdfs://### HDFS PATH ### 2272 [1,2,4,8589934592,1,0] -val_207 hdfs://### HDFS PATH ### 5022 [1,2,4,8589934592,1,0] -val_207 hdfs://### HDFS PATH ### 634 [1,2,4,8589934592,1,0] -val_208 hdfs://### HDFS PATH ### 1272 [1,2,4,8589934592,1,0] -val_208 hdfs://### HDFS PATH ### 1948 [1,2,4,8589934592,1,0] -val_208 hdfs://### HDFS PATH ### 670 [1,2,4,8589934592,1,0] -val_209 hdfs://### HDFS PATH ### 3504 [1,2,4,8589934592,1,0] -val_209 hdfs://### HDFS PATH ### 374 [1,2,4,8589934592,1,0] -val_213 hdfs://### HDFS PATH ### 1508 [1,2,4,8589934592,1,0] -val_213 hdfs://### HDFS PATH ### 220 [1,2,4,8589934592,1,0] -val_214 hdfs://### HDFS PATH ### 5116 [1,2,4,8589934592,1,0] -val_216 hdfs://### HDFS PATH ### 1520 [1,2,4,8589934592,1,0] -val_216 hdfs://### HDFS PATH ### 3650 [1,2,4,8589934592,1,0] -val_217 hdfs://### HDFS PATH ### 1860 [1,2,4,8589934592,1,0] -val_217 hdfs://### HDFS PATH ### 4396 [1,2,4,8589934592,1,0] -val_218 hdfs://### HDFS PATH ### 3446 [1,2,4,8589934592,1,0] -val_219 hdfs://### HDFS PATH ### 3710 [1,2,4,8589934592,1,0] -val_219 hdfs://### HDFS PATH ### 478 [1,2,4,8589934592,1,0] -val_221 hdfs://### HDFS PATH ### 1164 [1,2,4,8589934592,1,0] -val_221 hdfs://### HDFS PATH ### 1580 [1,2,4,8589934592,1,0] -val_222 hdfs://### HDFS PATH ### 5720 [1,2,4,8589934592,1,0] -val_223 hdfs://### HDFS PATH ### 3398 [1,2,4,8589934592,1,0] -val_223 hdfs://### HDFS PATH ### 3758 [1,2,4,8589934592,1,0] -val_224 hdfs://### HDFS PATH ### 174 [1,2,4,8589934592,1,0] -val_224 hdfs://### HDFS PATH ### 2892 [1,2,4,8589934592,1,0] -val_226 hdfs://### HDFS PATH ### 3048 [1,2,4,8589934592,1,0] -val_228 hdfs://### HDFS PATH ### 3458 [1,2,4,8589934592,1,0] -val_229 hdfs://### HDFS PATH ### 3202 [1,2,4,8589934592,1,0] -val_229 hdfs://### HDFS PATH ### 3956 [1,2,4,8589934592,1,0] -val_230 hdfs://### HDFS PATH ### 1730 [1,2,4,8589934592,1,0] -val_230 hdfs://### HDFS PATH ### 1936 [1,2,4,8589934592,1,0] -val_230 hdfs://### HDFS PATH ### 2260 [1,2,4,8589934592,1,0] -val_230 hdfs://### HDFS PATH ### 3580 [1,2,4,8589934592,1,0] -val_230 hdfs://### HDFS PATH ### 4914 [1,2,4,8589934592,1,0] -val_233 hdfs://### HDFS PATH ### 3214 [1,2,4,8589934592,1,0] -val_233 hdfs://### HDFS PATH ### 5140 [1,2,4,8589934592,1,0] -val_235 hdfs://### HDFS PATH ### 4046 [1,2,4,8589934592,1,0] -val_237 hdfs://### HDFS PATH ### 4722 [1,2,4,8589934592,1,0] -val_237 hdfs://### HDFS PATH ### 574 [1,2,4,8589934592,1,0] -val_238 hdfs://### HDFS PATH ### 0 [1,2,4,8589934592,1,0] -val_238 hdfs://### HDFS PATH ### 2746 [1,2,4,8589934592,1,0] -val_239 hdfs://### HDFS PATH ### 1496 [1,2,4,8589934592,1,0] -val_239 hdfs://### HDFS PATH ### 3722 [1,2,4,8589934592,1,0] -val_24 hdfs://### HDFS PATH ### 1972 [1,2,4,8589934592,1,0] -val_24 hdfs://### HDFS PATH ### 4594 [1,2,4,8589934592,1,0] -val_241 hdfs://### HDFS PATH ### 1662 [1,2,4,8589934592,1,0] -val_242 hdfs://### HDFS PATH ### 2940 [1,2,4,8589934592,1,0] -val_242 hdfs://### HDFS PATH ### 3012 [1,2,4,8589934592,1,0] -val_244 hdfs://### HDFS PATH ### 3872 [1,2,4,8589934592,1,0] -val_247 hdfs://### HDFS PATH ### 718 [1,2,4,8589934592,1,0] -val_248 hdfs://### HDFS PATH ### 4758 [1,2,4,8589934592,1,0] -val_249 hdfs://### HDFS PATH ### 5034 [1,2,4,8589934592,1,0] -val_252 hdfs://### HDFS PATH ### 454 [1,2,4,8589934592,1,0] -val_255 hdfs://### HDFS PATH ### 4616 [1,2,4,8589934592,1,0] -val_255 hdfs://### HDFS PATH ### 68 [1,2,4,8589934592,1,0] -val_256 hdfs://### HDFS PATH ### 3770 [1,2,4,8589934592,1,0] -val_256 hdfs://### HDFS PATH ### 5272 [1,2,4,8589934592,1,0] -val_257 hdfs://### HDFS PATH ### 4208 [1,2,4,8589934592,1,0] -val_258 hdfs://### HDFS PATH ### 4292 [1,2,4,8589934592,1,0] -val_26 hdfs://### HDFS PATH ### 2226 [1,2,4,8589934592,1,0] -val_26 hdfs://### HDFS PATH ### 5284 [1,2,4,8589934592,1,0] -val_260 hdfs://### HDFS PATH ### 1764 [1,2,4,8589934592,1,0] -val_262 hdfs://### HDFS PATH ### 4326 [1,2,4,8589934592,1,0] -val_263 hdfs://### HDFS PATH ### 3782 [1,2,4,8589934592,1,0] -val_265 hdfs://### HDFS PATH ### 114 [1,2,4,8589934592,1,0] -val_265 hdfs://### HDFS PATH ### 5046 [1,2,4,8589934592,1,0] -val_266 hdfs://### HDFS PATH ### 814 [1,2,4,8589934592,1,0] -val_27 hdfs://### HDFS PATH ### 34 [1,2,4,8589934592,1,0] -val_272 hdfs://### HDFS PATH ### 1836 [1,2,4,8589934592,1,0] -val_272 hdfs://### HDFS PATH ### 2976 [1,2,4,8589934592,1,0] -val_273 hdfs://### HDFS PATH ### 162 [1,2,4,8589934592,1,0] -val_273 hdfs://### HDFS PATH ### 2868 [1,2,4,8589934592,1,0] -val_273 hdfs://### HDFS PATH ### 5524 [1,2,4,8589934592,1,0] -val_274 hdfs://### HDFS PATH ### 3698 [1,2,4,8589934592,1,0] -val_275 hdfs://### HDFS PATH ### 1638 [1,2,4,8589934592,1,0] -val_277 hdfs://### HDFS PATH ### 1260 [1,2,4,8589934592,1,0] -val_277 hdfs://### HDFS PATH ### 2856 [1,2,4,8589934592,1,0] -val_277 hdfs://### HDFS PATH ### 362 [1,2,4,8589934592,1,0] -val_277 hdfs://### HDFS PATH ### 4902 [1,2,4,8589934592,1,0] -val_278 hdfs://### HDFS PATH ### 1544 [1,2,4,8589934592,1,0] -val_278 hdfs://### HDFS PATH ### 80 [1,2,4,8589934592,1,0] -val_28 hdfs://### HDFS PATH ### 5616 [1,2,4,8589934592,1,0] -val_280 hdfs://### HDFS PATH ### 1226 [1,2,4,8589934592,1,0] -val_280 hdfs://### HDFS PATH ### 3992 [1,2,4,8589934592,1,0] -val_281 hdfs://### HDFS PATH ### 350 [1,2,4,8589934592,1,0] -val_281 hdfs://### HDFS PATH ### 5548 [1,2,4,8589934592,1,0] -val_282 hdfs://### HDFS PATH ### 2468 [1,2,4,8589934592,1,0] -val_282 hdfs://### HDFS PATH ### 2722 [1,2,4,8589934592,1,0] -val_283 hdfs://### HDFS PATH ### 4022 [1,2,4,8589934592,1,0] -val_284 hdfs://### HDFS PATH ### 1708 [1,2,4,8589934592,1,0] -val_285 hdfs://### HDFS PATH ### 5478 [1,2,4,8589934592,1,0] -val_286 hdfs://### HDFS PATH ### 1404 [1,2,4,8589934592,1,0] -val_287 hdfs://### HDFS PATH ### 490 [1,2,4,8589934592,1,0] -val_288 hdfs://### HDFS PATH ### 2422 [1,2,4,8589934592,1,0] -val_288 hdfs://### HDFS PATH ### 3840 [1,2,4,8589934592,1,0] -val_289 hdfs://### HDFS PATH ### 1568 [1,2,4,8589934592,1,0] -val_291 hdfs://### HDFS PATH ### 4582 [1,2,4,8589934592,1,0] -val_292 hdfs://### HDFS PATH ### 466 [1,2,4,8589934592,1,0] -val_296 hdfs://### HDFS PATH ### 3626 [1,2,4,8589934592,1,0] -val_298 hdfs://### HDFS PATH ### 2168 [1,2,4,8589934592,1,0] -val_298 hdfs://### HDFS PATH ### 4456 [1,2,4,8589934592,1,0] -val_298 hdfs://### HDFS PATH ### 5386 [1,2,4,8589934592,1,0] -val_30 hdfs://### HDFS PATH ### 3494 [1,2,4,8589934592,1,0] -val_302 hdfs://### HDFS PATH ### 1034 [1,2,4,8589934592,1,0] -val_305 hdfs://### HDFS PATH ### 4782 [1,2,4,8589934592,1,0] -val_306 hdfs://### HDFS PATH ### 2880 [1,2,4,8589934592,1,0] -val_307 hdfs://### HDFS PATH ### 2812 [1,2,4,8589934592,1,0] -val_307 hdfs://### HDFS PATH ### 5672 [1,2,4,8589934592,1,0] -val_308 hdfs://### HDFS PATH ### 2388 [1,2,4,8589934592,1,0] -val_309 hdfs://### HDFS PATH ### 2904 [1,2,4,8589934592,1,0] -val_309 hdfs://### HDFS PATH ### 790 [1,2,4,8589934592,1,0] -val_310 hdfs://### HDFS PATH ### 4962 [1,2,4,8589934592,1,0] -val_311 hdfs://### HDFS PATH ### 1000 [1,2,4,8589934592,1,0] -val_311 hdfs://### HDFS PATH ### 1626 [1,2,4,8589934592,1,0] -val_311 hdfs://### HDFS PATH ### 22 [1,2,4,8589934592,1,0] -val_315 hdfs://### HDFS PATH ### 5594 [1,2,4,8589934592,1,0] -val_316 hdfs://### HDFS PATH ### 1012 [1,2,4,8589934592,1,0] -val_316 hdfs://### HDFS PATH ### 2576 [1,2,4,8589934592,1,0] -val_316 hdfs://### HDFS PATH ### 3944 [1,2,4,8589934592,1,0] -val_317 hdfs://### HDFS PATH ### 3104 [1,2,4,8589934592,1,0] -val_317 hdfs://### HDFS PATH ### 4974 [1,2,4,8589934592,1,0] -val_318 hdfs://### HDFS PATH ### 1602 [1,2,4,8589934592,1,0] -val_318 hdfs://### HDFS PATH ### 2504 [1,2,4,8589934592,1,0] -val_318 hdfs://### HDFS PATH ### 2516 [1,2,4,8589934592,1,0] -val_321 hdfs://### HDFS PATH ### 3308 [1,2,4,8589934592,1,0] -val_321 hdfs://### HDFS PATH ### 4090 [1,2,4,8589934592,1,0] -val_322 hdfs://### HDFS PATH ### 2096 [1,2,4,8589934592,1,0] -val_322 hdfs://### HDFS PATH ### 3250 [1,2,4,8589934592,1,0] -val_323 hdfs://### HDFS PATH ### 4878 [1,2,4,8589934592,1,0] -val_325 hdfs://### HDFS PATH ### 4890 [1,2,4,8589934592,1,0] -val_325 hdfs://### HDFS PATH ### 862 [1,2,4,8589934592,1,0] -val_327 hdfs://### HDFS PATH ### 2248 [1,2,4,8589934592,1,0] -val_327 hdfs://### HDFS PATH ### 2928 [1,2,4,8589934592,1,0] -val_327 hdfs://### HDFS PATH ### 338 [1,2,4,8589934592,1,0] -val_33 hdfs://### HDFS PATH ### 3592 [1,2,4,8589934592,1,0] -val_331 hdfs://### HDFS PATH ### 2988 [1,2,4,8589934592,1,0] -val_331 hdfs://### HDFS PATH ### 4034 [1,2,4,8589934592,1,0] -val_332 hdfs://### HDFS PATH ### 1614 [1,2,4,8589934592,1,0] -val_333 hdfs://### HDFS PATH ### 1684 [1,2,4,8589934592,1,0] -val_333 hdfs://### HDFS PATH ### 4986 [1,2,4,8589934592,1,0] -val_335 hdfs://### HDFS PATH ### 4102 [1,2,4,8589934592,1,0] -val_336 hdfs://### HDFS PATH ### 3148 [1,2,4,8589934592,1,0] -val_338 hdfs://### HDFS PATH ### 526 [1,2,4,8589934592,1,0] -val_339 hdfs://### HDFS PATH ### 956 [1,2,4,8589934592,1,0] -val_34 hdfs://### HDFS PATH ### 3192 [1,2,4,8589934592,1,0] -val_341 hdfs://### HDFS PATH ### 5406 [1,2,4,8589934592,1,0] -val_342 hdfs://### HDFS PATH ### 3558 [1,2,4,8589934592,1,0] -val_342 hdfs://### HDFS PATH ### 838 [1,2,4,8589934592,1,0] -val_344 hdfs://### HDFS PATH ### 3674 [1,2,4,8589934592,1,0] -val_344 hdfs://### HDFS PATH ### 5560 [1,2,4,8589934592,1,0] -val_345 hdfs://### HDFS PATH ### 1082 [1,2,4,8589934592,1,0] -val_348 hdfs://### HDFS PATH ### 1882 [1,2,4,8589934592,1,0] -val_348 hdfs://### HDFS PATH ### 1960 [1,2,4,8589934592,1,0] -val_348 hdfs://### HDFS PATH ### 4338 [1,2,4,8589934592,1,0] -val_348 hdfs://### HDFS PATH ### 5490 [1,2,4,8589934592,1,0] -val_348 hdfs://### HDFS PATH ### 5660 [1,2,4,8589934592,1,0] -val_35 hdfs://### HDFS PATH ### 1238 [1,2,4,8589934592,1,0] -val_35 hdfs://### HDFS PATH ### 3138 [1,2,4,8589934592,1,0] -val_35 hdfs://### HDFS PATH ### 4012 [1,2,4,8589934592,1,0] -val_351 hdfs://### HDFS PATH ### 4604 [1,2,4,8589934592,1,0] -val_353 hdfs://### HDFS PATH ### 1812 [1,2,4,8589934592,1,0] -val_353 hdfs://### HDFS PATH ### 5092 [1,2,4,8589934592,1,0] -val_356 hdfs://### HDFS PATH ### 1284 [1,2,4,8589934592,1,0] -val_360 hdfs://### HDFS PATH ### 4746 [1,2,4,8589934592,1,0] -val_362 hdfs://### HDFS PATH ### 5454 [1,2,4,8589934592,1,0] -val_364 hdfs://### HDFS PATH ### 2662 [1,2,4,8589934592,1,0] -val_365 hdfs://### HDFS PATH ### 802 [1,2,4,8589934592,1,0] -val_366 hdfs://### HDFS PATH ### 4138 [1,2,4,8589934592,1,0] -val_367 hdfs://### HDFS PATH ### 3662 [1,2,4,8589934592,1,0] -val_367 hdfs://### HDFS PATH ### 850 [1,2,4,8589934592,1,0] -val_368 hdfs://### HDFS PATH ### 3602 [1,2,4,8589934592,1,0] -val_369 hdfs://### HDFS PATH ### 186 [1,2,4,8589934592,1,0] -val_369 hdfs://### HDFS PATH ### 2564 [1,2,4,8589934592,1,0] -val_369 hdfs://### HDFS PATH ### 2952 [1,2,4,8589934592,1,0] -val_37 hdfs://### HDFS PATH ### 328 [1,2,4,8589934592,1,0] -val_37 hdfs://### HDFS PATH ### 5626 [1,2,4,8589934592,1,0] -val_373 hdfs://### HDFS PATH ### 1824 [1,2,4,8589934592,1,0] -val_374 hdfs://### HDFS PATH ### 268 [1,2,4,8589934592,1,0] -val_375 hdfs://### HDFS PATH ### 5212 [1,2,4,8589934592,1,0] -val_377 hdfs://### HDFS PATH ### 766 [1,2,4,8589934592,1,0] -val_378 hdfs://### HDFS PATH ### 1152 [1,2,4,8589934592,1,0] -val_379 hdfs://### HDFS PATH ### 5328 [1,2,4,8589934592,1,0] -val_382 hdfs://### HDFS PATH ### 1320 [1,2,4,8589934592,1,0] -val_382 hdfs://### HDFS PATH ### 4528 [1,2,4,8589934592,1,0] -val_384 hdfs://### HDFS PATH ### 1788 [1,2,4,8589934592,1,0] -val_384 hdfs://### HDFS PATH ### 5260 [1,2,4,8589934592,1,0] -val_384 hdfs://### HDFS PATH ### 5316 [1,2,4,8589934592,1,0] -val_386 hdfs://### HDFS PATH ### 1356 [1,2,4,8589934592,1,0] -val_389 hdfs://### HDFS PATH ### 2916 [1,2,4,8589934592,1,0] -val_392 hdfs://### HDFS PATH ### 2964 [1,2,4,8589934592,1,0] -val_393 hdfs://### HDFS PATH ### 2132 [1,2,4,8589934592,1,0] -val_394 hdfs://### HDFS PATH ### 562 [1,2,4,8589934592,1,0] -val_395 hdfs://### HDFS PATH ### 2710 [1,2,4,8589934592,1,0] -val_395 hdfs://### HDFS PATH ### 3116 [1,2,4,8589934592,1,0] -val_396 hdfs://### HDFS PATH ### 3092 [1,2,4,8589934592,1,0] -val_396 hdfs://### HDFS PATH ### 4372 [1,2,4,8589934592,1,0] -val_396 hdfs://### HDFS PATH ### 706 [1,2,4,8589934592,1,0] -val_397 hdfs://### HDFS PATH ### 4558 [1,2,4,8589934592,1,0] -val_397 hdfs://### HDFS PATH ### 778 [1,2,4,8589934592,1,0] -val_399 hdfs://### HDFS PATH ### 1296 [1,2,4,8589934592,1,0] -val_399 hdfs://### HDFS PATH ### 694 [1,2,4,8589934592,1,0] -val_4 hdfs://### HDFS PATH ### 1218 [1,2,4,8589934592,1,0] -val_400 hdfs://### HDFS PATH ### 5778 [1,2,4,8589934592,1,0] -val_401 hdfs://### HDFS PATH ### 138 [1,2,4,8589934592,1,0] -val_401 hdfs://### HDFS PATH ### 3000 [1,2,4,8589934592,1,0] -val_401 hdfs://### HDFS PATH ### 3828 [1,2,4,8589934592,1,0] -val_401 hdfs://### HDFS PATH ### 4268 [1,2,4,8589934592,1,0] -val_401 hdfs://### HDFS PATH ### 5224 [1,2,4,8589934592,1,0] -val_402 hdfs://### HDFS PATH ### 3080 [1,2,4,8589934592,1,0] -val_403 hdfs://### HDFS PATH ### 406 [1,2,4,8589934592,1,0] -val_403 hdfs://### HDFS PATH ### 4162 [1,2,4,8589934592,1,0] -val_403 hdfs://### HDFS PATH ### 5766 [1,2,4,8589934592,1,0] -val_404 hdfs://### HDFS PATH ### 1776 [1,2,4,8589934592,1,0] -val_404 hdfs://### HDFS PATH ### 2318 [1,2,4,8589934592,1,0] -val_406 hdfs://### HDFS PATH ### 244 [1,2,4,8589934592,1,0] -val_406 hdfs://### HDFS PATH ### 4220 [1,2,4,8589934592,1,0] -val_406 hdfs://### HDFS PATH ### 4256 [1,2,4,8589934592,1,0] -val_406 hdfs://### HDFS PATH ### 5152 [1,2,4,8589934592,1,0] -val_407 hdfs://### HDFS PATH ### 5248 [1,2,4,8589934592,1,0] -val_409 hdfs://### HDFS PATH ### 2528 [1,2,4,8589934592,1,0] -val_409 hdfs://### HDFS PATH ### 4232 [1,2,4,8589934592,1,0] -val_409 hdfs://### HDFS PATH ### 56 [1,2,4,8589934592,1,0] -val_41 hdfs://### HDFS PATH ### 3388 [1,2,4,8589934592,1,0] -val_411 hdfs://### HDFS PATH ### 1924 [1,2,4,8589934592,1,0] -val_413 hdfs://### HDFS PATH ### 2600 [1,2,4,8589934592,1,0] -val_413 hdfs://### HDFS PATH ### 610 [1,2,4,8589934592,1,0] -val_414 hdfs://### HDFS PATH ### 4686 [1,2,4,8589934592,1,0] -val_414 hdfs://### HDFS PATH ### 5696 [1,2,4,8589934592,1,0] -val_417 hdfs://### HDFS PATH ### 430 [1,2,4,8589934592,1,0] -val_417 hdfs://### HDFS PATH ### 4794 [1,2,4,8589934592,1,0] -val_417 hdfs://### HDFS PATH ### 730 [1,2,4,8589934592,1,0] -val_418 hdfs://### HDFS PATH ### 2204 [1,2,4,8589934592,1,0] -val_419 hdfs://### HDFS PATH ### 2758 [1,2,4,8589934592,1,0] -val_42 hdfs://### HDFS PATH ### 2030 [1,2,4,8589934592,1,0] -val_42 hdfs://### HDFS PATH ### 3298 [1,2,4,8589934592,1,0] -val_421 hdfs://### HDFS PATH ### 5236 [1,2,4,8589934592,1,0] -val_424 hdfs://### HDFS PATH ### 4350 [1,2,4,8589934592,1,0] -val_424 hdfs://### HDFS PATH ### 4504 [1,2,4,8589934592,1,0] -val_427 hdfs://### HDFS PATH ### 1248 [1,2,4,8589934592,1,0] -val_429 hdfs://### HDFS PATH ### 256 [1,2,4,8589934592,1,0] -val_429 hdfs://### HDFS PATH ### 4842 [1,2,4,8589934592,1,0] -val_43 hdfs://### HDFS PATH ### 2330 [1,2,4,8589934592,1,0] -val_430 hdfs://### HDFS PATH ### 1532 [1,2,4,8589934592,1,0] -val_430 hdfs://### HDFS PATH ### 3320 [1,2,4,8589934592,1,0] -val_430 hdfs://### HDFS PATH ### 442 [1,2,4,8589934592,1,0] -val_431 hdfs://### HDFS PATH ### 1994 [1,2,4,8589934592,1,0] -val_431 hdfs://### HDFS PATH ### 4420 [1,2,4,8589934592,1,0] -val_431 hdfs://### HDFS PATH ### 4480 [1,2,4,8589934592,1,0] -val_432 hdfs://### HDFS PATH ### 3920 [1,2,4,8589934592,1,0] -val_435 hdfs://### HDFS PATH ### 2834 [1,2,4,8589934592,1,0] -val_436 hdfs://### HDFS PATH ### 2340 [1,2,4,8589934592,1,0] -val_437 hdfs://### HDFS PATH ### 1368 [1,2,4,8589934592,1,0] -val_438 hdfs://### HDFS PATH ### 1070 [1,2,4,8589934592,1,0] -val_438 hdfs://### HDFS PATH ### 3884 [1,2,4,8589934592,1,0] -val_438 hdfs://### HDFS PATH ### 4662 [1,2,4,8589934592,1,0] -val_439 hdfs://### HDFS PATH ### 4734 [1,2,4,8589934592,1,0] -val_439 hdfs://### HDFS PATH ### 826 [1,2,4,8589934592,1,0] -val_44 hdfs://### HDFS PATH ### 4068 [1,2,4,8589934592,1,0] -val_443 hdfs://### HDFS PATH ### 4866 [1,2,4,8589934592,1,0] -val_444 hdfs://### HDFS PATH ### 4818 [1,2,4,8589934592,1,0] -val_446 hdfs://### HDFS PATH ### 538 [1,2,4,8589934592,1,0] -val_448 hdfs://### HDFS PATH ### 5636 [1,2,4,8589934592,1,0] -val_449 hdfs://### HDFS PATH ### 3434 [1,2,4,8589934592,1,0] -val_452 hdfs://### HDFS PATH ### 3024 [1,2,4,8589934592,1,0] -val_453 hdfs://### HDFS PATH ### 3482 [1,2,4,8589934592,1,0] -val_454 hdfs://### HDFS PATH ### 2144 [1,2,4,8589934592,1,0] -val_454 hdfs://### HDFS PATH ### 4432 [1,2,4,8589934592,1,0] -val_454 hdfs://### HDFS PATH ### 5200 [1,2,4,8589934592,1,0] -val_455 hdfs://### HDFS PATH ### 976 [1,2,4,8589934592,1,0] -val_457 hdfs://### HDFS PATH ### 2446 [1,2,4,8589934592,1,0] -val_458 hdfs://### HDFS PATH ### 3356 [1,2,4,8589934592,1,0] -val_458 hdfs://### HDFS PATH ### 5442 [1,2,4,8589934592,1,0] -val_459 hdfs://### HDFS PATH ### 1450 [1,2,4,8589934592,1,0] -val_459 hdfs://### HDFS PATH ### 550 [1,2,4,8589934592,1,0] -val_460 hdfs://### HDFS PATH ### 5010 [1,2,4,8589934592,1,0] -val_462 hdfs://### HDFS PATH ### 5128 [1,2,4,8589934592,1,0] -val_462 hdfs://### HDFS PATH ### 5350 [1,2,4,8589934592,1,0] -val_463 hdfs://### HDFS PATH ### 1982 [1,2,4,8589934592,1,0] -val_463 hdfs://### HDFS PATH ### 3980 [1,2,4,8589934592,1,0] -val_466 hdfs://### HDFS PATH ### 1894 [1,2,4,8589934592,1,0] -val_466 hdfs://### HDFS PATH ### 4126 [1,2,4,8589934592,1,0] -val_466 hdfs://### HDFS PATH ### 658 [1,2,4,8589934592,1,0] -val_467 hdfs://### HDFS PATH ### 3908 [1,2,4,8589934592,1,0] -val_468 hdfs://### HDFS PATH ### 2120 [1,2,4,8589934592,1,0] -val_468 hdfs://### HDFS PATH ### 2376 [1,2,4,8589934592,1,0] -val_468 hdfs://### HDFS PATH ### 3526 [1,2,4,8589934592,1,0] -val_468 hdfs://### HDFS PATH ### 4950 [1,2,4,8589934592,1,0] -val_469 hdfs://### HDFS PATH ### 1380 [1,2,4,8589934592,1,0] -val_469 hdfs://### HDFS PATH ### 2364 [1,2,4,8589934592,1,0] -val_469 hdfs://### HDFS PATH ### 292 [1,2,4,8589934592,1,0] -val_469 hdfs://### HDFS PATH ### 3968 [1,2,4,8589934592,1,0] -val_469 hdfs://### HDFS PATH ### 5582 [1,2,4,8589934592,1,0] -val_47 hdfs://### HDFS PATH ### 1198 [1,2,4,8589934592,1,0] -val_470 hdfs://### HDFS PATH ### 2540 [1,2,4,8589934592,1,0] -val_472 hdfs://### HDFS PATH ### 3238 [1,2,4,8589934592,1,0] -val_475 hdfs://### HDFS PATH ### 898 [1,2,4,8589934592,1,0] -val_477 hdfs://### HDFS PATH ### 5708 [1,2,4,8589934592,1,0] -val_478 hdfs://### HDFS PATH ### 4444 [1,2,4,8589934592,1,0] -val_478 hdfs://### HDFS PATH ### 4926 [1,2,4,8589934592,1,0] -val_479 hdfs://### HDFS PATH ### 4770 [1,2,4,8589934592,1,0] -val_480 hdfs://### HDFS PATH ### 3816 [1,2,4,8589934592,1,0] -val_480 hdfs://### HDFS PATH ### 4570 [1,2,4,8589934592,1,0] -val_480 hdfs://### HDFS PATH ### 5058 [1,2,4,8589934592,1,0] -val_481 hdfs://### HDFS PATH ### 2434 [1,2,4,8589934592,1,0] -val_482 hdfs://### HDFS PATH ### 586 [1,2,4,8589934592,1,0] -val_483 hdfs://### HDFS PATH ### 4174 [1,2,4,8589934592,1,0] -val_484 hdfs://### HDFS PATH ### 102 [1,2,4,8589934592,1,0] -val_485 hdfs://### HDFS PATH ### 3734 [1,2,4,8589934592,1,0] -val_487 hdfs://### HDFS PATH ### 3804 [1,2,4,8589934592,1,0] -val_489 hdfs://### HDFS PATH ### 1128 [1,2,4,8589934592,1,0] -val_489 hdfs://### HDFS PATH ### 1800 [1,2,4,8589934592,1,0] -val_489 hdfs://### HDFS PATH ### 3344 [1,2,4,8589934592,1,0] -val_489 hdfs://### HDFS PATH ### 742 [1,2,4,8589934592,1,0] -val_490 hdfs://### HDFS PATH ### 2640 [1,2,4,8589934592,1,0] -val_491 hdfs://### HDFS PATH ### 4710 [1,2,4,8589934592,1,0] -val_492 hdfs://### HDFS PATH ### 3410 [1,2,4,8589934592,1,0] -val_492 hdfs://### HDFS PATH ### 5362 [1,2,4,8589934592,1,0] -val_493 hdfs://### HDFS PATH ### 4998 [1,2,4,8589934592,1,0] -val_494 hdfs://### HDFS PATH ### 622 [1,2,4,8589934592,1,0] -val_495 hdfs://### HDFS PATH ### 316 [1,2,4,8589934592,1,0] -val_496 hdfs://### HDFS PATH ### 2076 [1,2,4,8589934592,1,0] -val_497 hdfs://### HDFS PATH ### 3068 [1,2,4,8589934592,1,0] -val_498 hdfs://### HDFS PATH ### 1332 [1,2,4,8589934592,1,0] -val_498 hdfs://### HDFS PATH ### 3262 [1,2,4,8589934592,1,0] -val_498 hdfs://### HDFS PATH ### 5418 [1,2,4,8589934592,1,0] -val_5 hdfs://### HDFS PATH ### 3060 [1,2,4,8589934592,1,0] -val_5 hdfs://### HDFS PATH ### 3864 [1,2,4,8589934592,1,0] -val_5 hdfs://### HDFS PATH ### 4540 [1,2,4,8589934592,1,0] -val_51 hdfs://### HDFS PATH ### 1462 [1,2,4,8589934592,1,0] -val_51 hdfs://### HDFS PATH ### 2308 [1,2,4,8589934592,1,0] -val_53 hdfs://### HDFS PATH ### 4186 [1,2,4,8589934592,1,0] -val_54 hdfs://### HDFS PATH ### 1440 [1,2,4,8589934592,1,0] -val_57 hdfs://### HDFS PATH ### 1024 [1,2,4,8589934592,1,0] -val_58 hdfs://### HDFS PATH ### 1906 [1,2,4,8589934592,1,0] -val_58 hdfs://### HDFS PATH ### 3128 [1,2,4,8589934592,1,0] -val_64 hdfs://### HDFS PATH ### 3516 [1,2,4,8589934592,1,0] -val_65 hdfs://### HDFS PATH ### 1592 [1,2,4,8589934592,1,0] -val_66 hdfs://### HDFS PATH ### 198 [1,2,4,8589934592,1,0] -val_67 hdfs://### HDFS PATH ### 1754 [1,2,4,8589934592,1,0] -val_67 hdfs://### HDFS PATH ### 5306 [1,2,4,8589934592,1,0] -val_69 hdfs://### HDFS PATH ### 3570 [1,2,4,8589934592,1,0] -val_70 hdfs://### HDFS PATH ### 3794 [1,2,4,8589934592,1,0] -val_70 hdfs://### HDFS PATH ### 4548 [1,2,4,8589934592,1,0] -val_70 hdfs://### HDFS PATH ### 4640 [1,2,4,8589934592,1,0] -val_72 hdfs://### HDFS PATH ### 1208 [1,2,4,8589934592,1,0] -val_72 hdfs://### HDFS PATH ### 2792 [1,2,4,8589934592,1,0] -val_74 hdfs://### HDFS PATH ### 3548 [1,2,4,8589934592,1,0] -val_76 hdfs://### HDFS PATH ### 3378 [1,2,4,8589934592,1,0] -val_76 hdfs://### HDFS PATH ### 3538 [1,2,4,8589934592,1,0] -val_77 hdfs://### HDFS PATH ### 2622 [1,2,4,8589934592,1,0] -val_78 hdfs://### HDFS PATH ### 3368 [1,2,4,8589934592,1,0] -val_8 hdfs://### HDFS PATH ### 1916 [1,2,4,8589934592,1,0] -val_80 hdfs://### HDFS PATH ### 4058 [1,2,4,8589934592,1,0] -val_82 hdfs://### HDFS PATH ### 396 [1,2,4,8589934592,1,0] -val_83 hdfs://### HDFS PATH ### 1674 [1,2,4,8589934592,1,0] -val_83 hdfs://### HDFS PATH ### 5070 [1,2,4,8589934592,1,0] -val_84 hdfs://### HDFS PATH ### 1872 [1,2,4,8589934592,1,0] -val_84 hdfs://### HDFS PATH ### 5606 [1,2,4,8589934592,1,0] -val_85 hdfs://### HDFS PATH ### 2612 [1,2,4,8589934592,1,0] -val_86 hdfs://### HDFS PATH ### 12 [1,2,4,8589934592,1,0] -val_87 hdfs://### HDFS PATH ### 2652 [1,2,4,8589934592,1,0] -val_9 hdfs://### HDFS PATH ### 5398 [1,2,4,8589934592,1,0] -val_90 hdfs://### HDFS PATH ### 2802 [1,2,4,8589934592,1,0] -val_90 hdfs://### HDFS PATH ### 4304 [1,2,4,8589934592,1,0] -val_90 hdfs://### HDFS PATH ### 5744 [1,2,4,8589934592,1,0] -val_92 hdfs://### HDFS PATH ### 1176 [1,2,4,8589934592,1,0] -val_95 hdfs://### HDFS PATH ### 2400 [1,2,4,8589934592,1,0] -val_95 hdfs://### HDFS PATH ### 3160 [1,2,4,8589934592,1,0] -val_96 hdfs://### HDFS PATH ### 2216 [1,2,4,8589934592,1,0] -val_97 hdfs://### HDFS PATH ### 5572 [1,2,4,8589934592,1,0] -val_97 hdfs://### HDFS PATH ### 5802 [1,2,4,8589934592,1,0] -val_98 hdfs://### HDFS PATH ### 2458 [1,2,4,8589934592,1,0] -val_98 hdfs://### HDFS PATH ### 92 [1,2,4,8589934592,1,0] -PREHOOK: query: EXPLAIN -SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets` -FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__ - WHERE key = 0) a - JOIN - (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__ - WHERE value = "val_0") b - ON - a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT -EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN -SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets` -FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__ - WHERE key = 0) a - JOIN - (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__ - WHERE value = "val_0") b - ON - a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT -EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: default__src_src1_index__ - Statistics: Num rows: 500 Data size: 46311 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) = 0.0) and _bucketname is not null and _offset is not null) (type: boolean) - Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: bigint) - sort order: ++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) - Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE - value expressions: _col2 (type: array) - TableScan - alias: default__src_src2_index__ - Statistics: Num rows: 500 Data size: 48311 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((value = 'val_0') and _bucketname is not null and _offset is not null) (type: boolean) - Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: bigint) - sort order: ++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) - Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE - value expressions: _col2 (type: array) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: string), _col1 (type: bigint) - 1 _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1, _col2, _col5 - Statistics: Num rows: 275 Data size: 25470 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (not EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(_col2,_col5))) (type: boolean) - Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: collect_set(_col1) - keys: _col0 (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: array) - Reduce Operator Tree: - Group By Operator - aggregations: collect_set(VALUE._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -#### A masked pattern was here #### -SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets` -FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__ - WHERE key = 0) a - JOIN - (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__ - WHERE value = "val_0") b - ON - a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT -EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src1_index__ -PREHOOK: Input: default@default__src_src2_index__ -#### A masked pattern was here #### -SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets` -FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__ - WHERE key = 0) a - JOIN - (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__ - WHERE value = "val_0") b - ON - a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT -EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src1_index__ -POSTHOOK: Input: default@default__src_src2_index__ -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0" -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0" -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0" -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0" -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: DROP INDEX src1_index ON src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src1_index ON src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: DROP INDEX src2_index ON src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src2_index ON src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src diff --git ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out deleted file mode 100644 index 773e9f1abc..0000000000 --- ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out +++ /dev/null @@ -1,150 +0,0 @@ -PREHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@srcpart -POSTHOOK: Output: default@default__srcpart_src_part_index__ -PREHOOK: query: ALTER INDEX src_part_index ON srcpart REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=11 -PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=12 -PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11 -PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12 -POSTHOOK: query: ALTER INDEX src_part_index ON srcpart REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: EXPLAIN SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09' -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09' -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-2 depends on stages: Stage-3 - Stage-1 depends on stages: Stage-2 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: default__srcpart_src_part_index__ - filterExpr: ((UDFToDouble(key) = 86.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean) - Filter Operator - predicate: ((UDFToDouble(key) = 86.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offset (type: bigint) - outputColumnNames: _bucketname, _offset - Group By Operator - aggregations: collect_set(_offset) - keys: _bucketname (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - value expressions: _col1 (type: array) - Reduce Operator Tree: - Group By Operator - aggregations: collect_set(VALUE._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-2 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: srcpart - filterExpr: ((UDFToDouble(key) = 86.0) and (ds = '2008-04-09')) (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_src_part_index__ -PREHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11 -PREHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12 -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_src_part_index__ -POSTHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12 -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -86 val_86 -86 val_86 -PREHOOK: query: DROP INDEX src_part_index ON srcpart -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: DROP INDEX src_part_index ON srcpart -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@srcpart diff --git ql/src/test/results/clientpositive/index_bitmap_compression.q.out ql/src/test/results/clientpositive/index_bitmap_compression.q.out deleted file mode 100644 index 1f8e40abb0..0000000000 --- ql/src/test/results/clientpositive/index_bitmap_compression.q.out +++ /dev/null @@ -1,133 +0,0 @@ -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -POSTHOOK: query: ALTER INDEX src_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -POSTHOOK: Lineage: default__src_src_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-2 depends on stages: Stage-3 - Stage-1 depends on stages: Stage-2 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: default__src_src_index__ - filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean) - Filter Operator - predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offset (type: bigint) - outputColumnNames: _bucketname, _offset - Group By Operator - aggregations: collect_set(_offset) - keys: _bucketname (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - value expressions: _col1 (type: array) - Reduce Operator Tree: - Group By Operator - aggregations: collect_set(VALUE._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-2 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: true - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_index__ -POSTHOOK: Input: default@src -#### A masked pattern was here #### -82 val_82 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -85 val_85 -86 val_86 -87 val_87 -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: DROP INDEX src_index on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src_index on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src diff --git ql/src/test/results/clientpositive/index_bitmap_rc.q.out ql/src/test/results/clientpositive/index_bitmap_rc.q.out deleted file mode 100644 index 046442c12e..0000000000 --- ql/src/test/results/clientpositive/index_bitmap_rc.q.out +++ /dev/null @@ -1,349 +0,0 @@ -PREHOOK: query: CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@srcpart_rc -POSTHOOK: query: CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcpart_rc -PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=11 -POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=11).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 12 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=12 -POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 12 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=12 -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=11 -POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=11 -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=11).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 12 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=12 -POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 12 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: EXPLAIN -CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@srcpart_rc -POSTHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__ -PREHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@srcpart_rc -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -POSTHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.ds = '2008-04-08' and x.hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -POSTHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.ds = '2008-04-08' and x.hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ -x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' GROUP BY `_bucketname` -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ -x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' GROUP BY `_bucketname` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_rc -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -100 val_100 -100 val_100 -100 val_100 -100 val_100 -#### A masked pattern was here #### -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ -x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' and x.hr = 11 GROUP BY `_bucketname` -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ -x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' and x.hr = 11 GROUP BY `_bucketname` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_rc -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -100 val_100 -100 val_100 -PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_rc -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -100 val_100 -100 val_100 -PREHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@srcpart_rc -POSTHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@srcpart_rc -PREHOOK: query: EXPLAIN -CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@srcpart_rc -POSTHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__ -PREHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@srcpart_rc -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -POSTHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.key = 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.key = 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ -WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname` -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ -WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname` -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_rc -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_rc -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -PREHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@srcpart_rc -POSTHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@srcpart_rc -PREHOOK: query: DROP TABLE srcpart_rc -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@srcpart_rc -PREHOOK: Output: default@srcpart_rc -POSTHOOK: query: DROP TABLE srcpart_rc -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Output: default@srcpart_rc diff --git ql/src/test/results/clientpositive/index_compact.q.out ql/src/test/results/clientpositive/index_compact.q.out deleted file mode 100644 index 97d7bac00b..0000000000 --- ql/src/test/results/clientpositive/index_compact.q.out +++ /dev/null @@ -1,271 +0,0 @@ -PREHOOK: query: DROP INDEX srcpart_index_proj on srcpart -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: DROP INDEX srcpart_index_proj on srcpart -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@srcpart -PREHOOK: query: EXPLAIN -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@srcpart -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__ -PREHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -POSTHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x WHERE x.ds = '2008-04-08' and x.hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__ -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -POSTHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x WHERE x.ds = '2008-04-08' and x.hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__ -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__ -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__ -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -100 val_100 -100 val_100 -100 val_100 -100 val_100 -#### A masked pattern was here #### -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__ -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__ -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -100 val_100 -100 val_100 -PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -100 val_100 -100 val_100 -PREHOOK: query: DROP INDEX srcpart_index_proj on srcpart -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: DROP INDEX srcpart_index_proj on srcpart -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@srcpart -PREHOOK: query: EXPLAIN -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@srcpart -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__ -PREHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -POSTHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__ -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__ -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__ -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__ -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -PREHOOK: query: DROP INDEX srcpart_index_proj on srcpart -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@srcpart -POSTHOOK: query: DROP INDEX srcpart_index_proj on srcpart -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@srcpart diff --git ql/src/test/results/clientpositive/index_compact_1.q.out ql/src/test/results/clientpositive/index_compact_1.q.out deleted file mode 100644 index 7be9ada6aa..0000000000 --- ql/src/test/results/clientpositive/index_compact_1.q.out +++ /dev/null @@ -1,70 +0,0 @@ -PREHOOK: query: EXPLAIN -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -POSTHOOK: query: ALTER INDEX src_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT x.* FROM default__src_src_index__ x -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -#### A masked pattern was here #### -POSTHOOK: query: SELECT x.* FROM default__src_src_index__ x -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_index__ -#### A masked pattern was here #### -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -#### A masked pattern was here #### -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_index__ -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM src WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -100 val_100 -100 val_100 -PREHOOK: query: SELECT key, value FROM src WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -100 val_100 -100 val_100 -PREHOOK: query: DROP INDEX src_index on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src_index on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src diff --git ql/src/test/results/clientpositive/index_compact_2.q.out ql/src/test/results/clientpositive/index_compact_2.q.out deleted file mode 100644 index 28ba095ed1..0000000000 --- ql/src/test/results/clientpositive/index_compact_2.q.out +++ /dev/null @@ -1,317 +0,0 @@ -PREHOOK: query: CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@srcpart_rc -POSTHOOK: query: CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcpart_rc -PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=11 -POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=11).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 12 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=12 -POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 12 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=12 -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=11 -POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=11 -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=11).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 12 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=12 -POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 12 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@srcpart_rc -POSTHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__ -PREHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@srcpart_rc -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -POSTHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.ds = '2008-04-08' and x.hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -POSTHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.ds = '2008-04-08' and x.hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_rc -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -#### A masked pattern was here #### -100 val_100 -100 val_100 -100 val_100 -100 val_100 -#### A masked pattern was here #### -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_rc -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -100 val_100 -100 val_100 -PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_rc -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -#### A masked pattern was here #### -100 val_100 -100 val_100 -PREHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@srcpart_rc -POSTHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@srcpart_rc -PREHOOK: query: EXPLAIN -CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -POSTHOOK: query: EXPLAIN -CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - -PREHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@srcpart_rc -POSTHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__ -PREHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@srcpart_rc -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -POSTHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -PREHOOK: type: QUERY -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__ -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_rc -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_rc -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -PREHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@srcpart_rc -POSTHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@srcpart_rc -PREHOOK: query: DROP TABLE srcpart_rc -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@srcpart_rc -PREHOOK: Output: default@srcpart_rc -POSTHOOK: query: DROP TABLE srcpart_rc -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@srcpart_rc -POSTHOOK: Output: default@srcpart_rc diff --git ql/src/test/results/clientpositive/index_compact_3.q.out ql/src/test/results/clientpositive/index_compact_3.q.out deleted file mode 100644 index 14a5927496..0000000000 --- ql/src/test/results/clientpositive/index_compact_3.q.out +++ /dev/null @@ -1,84 +0,0 @@ -PREHOOK: query: CREATE TABLE src_index_test_rc (key int, value string) STORED AS RCFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@src_index_test_rc -POSTHOOK: query: CREATE TABLE src_index_test_rc (key int, value string) STORED AS RCFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_index_test_rc -PREHOOK: query: INSERT OVERWRITE TABLE src_index_test_rc SELECT * FROM src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@src_index_test_rc -POSTHOOK: query: INSERT OVERWRITE TABLE src_index_test_rc SELECT * FROM src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_index_test_rc -POSTHOOK: Lineage: src_index_test_rc.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_index_test_rc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE INDEX src_index ON TABLE src_index_test_rc(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src_index_test_rc -POSTHOOK: query: CREATE INDEX src_index ON TABLE src_index_test_rc(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src_index_test_rc -POSTHOOK: Output: default@default__src_index_test_rc_src_index__ -PREHOOK: query: ALTER INDEX src_index ON src_index_test_rc REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src_index_test_rc -PREHOOK: Output: default@default__src_index_test_rc_src_index__ -POSTHOOK: query: ALTER INDEX src_index ON src_index_test_rc REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src_index_test_rc -POSTHOOK: Output: default@default__src_index_test_rc_src_index__ -POSTHOOK: Lineage: default__src_index_test_rc_src_index__._bucketname SIMPLE [(src_index_test_rc)src_index_test_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_index_test_rc_src_index__._offsets EXPRESSION [(src_index_test_rc)src_index_test_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_index_test_rc_src_index__.key SIMPLE [(src_index_test_rc)src_index_test_rc.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: SELECT x.* FROM default__src_index_test_rc_src_index__ x -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_index_test_rc_src_index__ -#### A masked pattern was here #### -POSTHOOK: query: SELECT x.* FROM default__src_index_test_rc_src_index__ x -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_index_test_rc_src_index__ -#### A masked pattern was here #### -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_index_test_rc_src_index__ -#### A masked pattern was here #### -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_index_test_rc_src_index__ -#### A masked pattern was here #### -PREHOOK: query: SELECT key, value FROM src_index_test_rc WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src_index_test_rc -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src_index_test_rc WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_index_test_rc -#### A masked pattern was here #### -100 val_100 -100 val_100 -PREHOOK: query: SELECT key, value FROM src_index_test_rc WHERE key=100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src_index_test_rc -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src_index_test_rc WHERE key=100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_index_test_rc -#### A masked pattern was here #### -100 val_100 -100 val_100 -PREHOOK: query: DROP INDEX src_index on src_index_test_rc -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src_index_test_rc -POSTHOOK: query: DROP INDEX src_index on src_index_test_rc -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src_index_test_rc -PREHOOK: query: DROP TABLE src_index_test_rc -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@src_index_test_rc -PREHOOK: Output: default@src_index_test_rc -POSTHOOK: query: DROP TABLE src_index_test_rc -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@src_index_test_rc -POSTHOOK: Output: default@src_index_test_rc diff --git ql/src/test/results/clientpositive/index_compact_binary_search.q.out ql/src/test/results/clientpositive/index_compact_binary_search.q.out deleted file mode 100644 index dbbd9edaf3..0000000000 --- ql/src/test/results/clientpositive/index_compact_binary_search.q.out +++ /dev/null @@ -1,473 +0,0 @@ -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -POSTHOOK: query: ALTER INDEX src_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM src WHERE key = '0' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key < '1' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key <= '0' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key > '8' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -86 val_86 -98 val_98 -82 val_82 -92 val_92 -83 val_83 -84 val_84 -96 val_96 -95 val_95 -98 val_98 -85 val_85 -87 val_87 -90 val_90 -95 val_95 -80 val_80 -90 val_90 -83 val_83 -9 val_9 -97 val_97 -84 val_84 -90 val_90 -97 val_97 -PREHOOK: query: SELECT * FROM src WHERE key >= '9' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -98 val_98 -92 val_92 -96 val_96 -95 val_95 -98 val_98 -90 val_90 -95 val_95 -90 val_90 -9 val_9 -97 val_97 -90 val_90 -97 val_97 -PREHOOK: query: DROP INDEX src_index ON src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -PREHOOK: query: SELECT * FROM src WHERE key = '0' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key < '1' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key <= '0' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key > '8' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -86 val_86 -98 val_98 -82 val_82 -92 val_92 -83 val_83 -84 val_84 -96 val_96 -95 val_95 -98 val_98 -85 val_85 -87 val_87 -90 val_90 -95 val_95 -80 val_80 -90 val_90 -83 val_83 -9 val_9 -97 val_97 -84 val_84 -90 val_90 -97 val_97 -PREHOOK: query: SELECT * FROM src WHERE key >= '9' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -98 val_98 -92 val_92 -96 val_96 -95 val_95 -98 val_98 -90 val_90 -95 val_95 -90 val_90 -9 val_9 -97 val_97 -90 val_90 -97 val_97 -PREHOOK: query: DROP INDEX src_index ON src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -PREHOOK: query: SELECT * FROM src WHERE key = '0' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key < '1' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key <= '0' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key > '8' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -86 val_86 -98 val_98 -82 val_82 -92 val_92 -83 val_83 -84 val_84 -96 val_96 -95 val_95 -98 val_98 -85 val_85 -87 val_87 -90 val_90 -95 val_95 -80 val_80 -90 val_90 -83 val_83 -9 val_9 -97 val_97 -84 val_84 -90 val_90 -97 val_97 -PREHOOK: query: SELECT * FROM src WHERE key >= '9' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -98 val_98 -92 val_92 -96 val_96 -95 val_95 -98 val_98 -90 val_90 -95 val_95 -90 val_90 -9 val_9 -97 val_97 -90 val_90 -97 val_97 -PREHOOK: query: DROP INDEX src_index ON src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -PREHOOK: query: SELECT * FROM src WHERE key = '0' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key < '1' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key <= '0' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key > '8' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -86 val_86 -98 val_98 -82 val_82 -92 val_92 -83 val_83 -84 val_84 -96 val_96 -95 val_95 -98 val_98 -85 val_85 -87 val_87 -90 val_90 -95 val_95 -80 val_80 -90 val_90 -83 val_83 -9 val_9 -97 val_97 -84 val_84 -90 val_90 -97 val_97 -PREHOOK: query: SELECT * FROM src WHERE key >= '9' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -98 val_98 -92 val_92 -96 val_96 -95 val_95 -98 val_98 -90 val_90 -95 val_95 -90 val_90 -9 val_9 -97 val_97 -90 val_90 -97 val_97 -PREHOOK: query: DROP INDEX src_index ON src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -PREHOOK: query: SELECT * FROM src WHERE key = '0' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key < '1' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key <= '0' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key > '8' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -86 val_86 -98 val_98 -82 val_82 -92 val_92 -83 val_83 -84 val_84 -96 val_96 -95 val_95 -98 val_98 -85 val_85 -87 val_87 -90 val_90 -95 val_95 -80 val_80 -90 val_90 -83 val_83 -9 val_9 -97 val_97 -84 val_84 -90 val_90 -97 val_97 -PREHOOK: query: SELECT * FROM src WHERE key >= '9' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -98 val_98 -92 val_92 -96 val_96 -95 val_95 -98 val_98 -90 val_90 -95 val_95 -90 val_90 -9 val_9 -97 val_97 -90 val_90 -97 val_97 -PREHOOK: query: DROP INDEX src_index ON src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -PREHOOK: query: SELECT * FROM src WHERE key = '0' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key < '1' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key <= '0' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -PREHOOK: query: SELECT * FROM src WHERE key > '8' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -86 val_86 -98 val_98 -82 val_82 -92 val_92 -83 val_83 -84 val_84 -96 val_96 -95 val_95 -98 val_98 -85 val_85 -87 val_87 -90 val_90 -95 val_95 -80 val_80 -90 val_90 -83 val_83 -9 val_9 -97 val_97 -84 val_84 -90 val_90 -97 val_97 -PREHOOK: query: SELECT * FROM src WHERE key >= '9' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -98 val_98 -92 val_92 -96 val_96 -95 val_95 -98 val_98 -90 val_90 -95 val_95 -90 val_90 -9 val_9 -97 val_97 -90 val_90 -97 val_97 -PREHOOK: query: DROP INDEX src_index ON src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src diff --git ql/src/test/results/clientpositive/index_compression.q.out ql/src/test/results/clientpositive/index_compression.q.out deleted file mode 100644 index a2c0d2702b..0000000000 --- ql/src/test/results/clientpositive/index_compression.q.out +++ /dev/null @@ -1,158 +0,0 @@ -PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -PREHOOK: query: ALTER INDEX src_index ON src REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@src -PREHOOK: Output: default@default__src_src_index__ -POSTHOOK: query: ALTER INDEX src_index ON src REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index__ -POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 - Stage-1 depends on stages: Stage-2 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: default__src_src_index__ - filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean) - Filter Operator - predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offsets (type: array) - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-8 - Conditional Operator - - Stage: Stage-5 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-2 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: true - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-6 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-7 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__src_src_index__ -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__src_src_index__ -POSTHOOK: Input: default@src -#### A masked pattern was here #### -82 val_82 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -85 val_85 -86 val_86 -87 val_87 -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 -98 val_98 -PREHOOK: query: DROP INDEX src_index on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: DROP INDEX src_index on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src diff --git ql/src/test/results/clientpositive/index_creation.q.out ql/src/test/results/clientpositive/index_creation.q.out deleted file mode 100644 index 49b3f11e7b..0000000000 --- ql/src/test/results/clientpositive/index_creation.q.out +++ /dev/null @@ -1,321 +0,0 @@ -PREHOOK: query: drop index src_index_2 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_2 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop index src_index_3 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_3 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop index src_index_4 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_4 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop index src_index_5 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_5 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop index src_index_6 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_6 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop index src_index_7 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_7 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop index src_index_8 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_8 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop index src_index_9 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_9 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop table `_t` -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table `_t` -POSTHOOK: type: DROPTABLE -PREHOOK: query: create index src_index_2 on table src(key) as 'compact' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: create index src_index_2 on table src(key) as 'compact' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index_2__ -PREHOOK: query: desc extended default__src_src_index_2__ -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@default__src_src_index_2__ -POSTHOOK: query: desc extended default__src_src_index_2__ -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@default__src_src_index_2__ -key string default -_bucketname string -_offsets array - -#### A masked pattern was here #### -PREHOOK: query: create index src_index_3 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_3 -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: create index src_index_3 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_3 -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_idx_src_index_3 -PREHOOK: query: desc extended src_idx_src_index_3 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@src_idx_src_index_3 -POSTHOOK: query: desc extended src_idx_src_index_3 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@src_idx_src_index_3 -key string default -_bucketname string -_offsets array - -#### A masked pattern was here #### -PREHOOK: query: create index src_index_4 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: create index src_index_4 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index_4__ -PREHOOK: query: desc extended default__src_src_index_4__ -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@default__src_src_index_4__ -POSTHOOK: query: desc extended default__src_src_index_4__ -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@default__src_src_index_4__ -key string default -_bucketname string -_offsets array - -#### A masked pattern was here #### -PREHOOK: query: create index src_index_5 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\' -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: create index src_index_5 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\' -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index_5__ -PREHOOK: query: desc extended default__src_src_index_5__ -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@default__src_src_index_5__ -POSTHOOK: query: desc extended default__src_src_index_5__ -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@default__src_src_index_5__ -key string default -_bucketname string -_offsets array - -#### A masked pattern was here #### -PREHOOK: query: create index src_index_6 on table src(key) as 'compact' WITH DEFERRED REBUILD STORED AS RCFILE -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: create index src_index_6 on table src(key) as 'compact' WITH DEFERRED REBUILD STORED AS RCFILE -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index_6__ -PREHOOK: query: desc extended default__src_src_index_6__ -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@default__src_src_index_6__ -POSTHOOK: query: desc extended default__src_src_index_6__ -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@default__src_src_index_6__ -key string default -_bucketname string -_offsets array - -#### A masked pattern was here #### -PREHOOK: query: create index src_index_7 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_7 STORED AS RCFILE -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: create index src_index_7 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_7 STORED AS RCFILE -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_idx_src_index_7 -PREHOOK: query: desc extended src_idx_src_index_7 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@src_idx_src_index_7 -POSTHOOK: query: desc extended src_idx_src_index_7 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@src_idx_src_index_7 -key string default -_bucketname string -_offsets array - -#### A masked pattern was here #### -PREHOOK: query: create index src_index_8 on table src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2") -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: create index src_index_8 on table src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2") -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index_8__ -PREHOOK: query: desc extended default__src_src_index_8__ -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@default__src_src_index_8__ -POSTHOOK: query: desc extended default__src_src_index_8__ -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@default__src_src_index_8__ -key string default -_bucketname string -_offsets array - -#### A masked pattern was here #### -PREHOOK: query: create index src_index_9 on table src(key) as 'compact' WITH DEFERRED REBUILD TBLPROPERTIES ("prop1"="val1", "prop2"="val2") -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@src -POSTHOOK: query: create index src_index_9 on table src(key) as 'compact' WITH DEFERRED REBUILD TBLPROPERTIES ("prop1"="val1", "prop2"="val2") -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@src -POSTHOOK: Output: default@default__src_src_index_9__ -PREHOOK: query: desc extended default__src_src_index_9__ -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@default__src_src_index_9__ -POSTHOOK: query: desc extended default__src_src_index_9__ -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@default__src_src_index_9__ -key string default -_bucketname string -_offsets array - -#### A masked pattern was here #### -PREHOOK: query: create table `_t`(`_i` int, `_j` int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@_t -POSTHOOK: query: create table `_t`(`_i` int, `_j` int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@_t -PREHOOK: query: create index x on table `_t`(`_j`) as 'compact' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@_t -POSTHOOK: query: create index x on table `_t`(`_j`) as 'compact' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@_t -POSTHOOK: Output: default@default___t_x__ -PREHOOK: query: alter index x on `_t` rebuild -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@_t -PREHOOK: Output: default@default___t_x__ -POSTHOOK: query: alter index x on `_t` rebuild -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@_t -POSTHOOK: Output: default@default___t_x__ -POSTHOOK: Lineage: default___t_x__._bucketname SIMPLE [(_t)_t.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default___t_x__._j SIMPLE [(_t)_t.FieldSchema(name:_j, type:int, comment:null), ] -POSTHOOK: Lineage: default___t_x__._offsets EXPRESSION [(_t)_t.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -PREHOOK: query: create index x2 on table `_t`(`_i`,`_j`) as 'compact' WITH DEFERRED -REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@_t -POSTHOOK: query: create index x2 on table `_t`(`_i`,`_j`) as 'compact' WITH DEFERRED -REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@_t -POSTHOOK: Output: default@default___t_x2__ -PREHOOK: query: alter index x2 on `_t` rebuild -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@_t -PREHOOK: Output: default@default___t_x2__ -POSTHOOK: query: alter index x2 on `_t` rebuild -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@_t -POSTHOOK: Output: default@default___t_x2__ -POSTHOOK: Lineage: default___t_x2__._bucketname SIMPLE [(_t)_t.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default___t_x2__._i SIMPLE [(_t)_t.FieldSchema(name:_i, type:int, comment:null), ] -POSTHOOK: Lineage: default___t_x2__._j SIMPLE [(_t)_t.FieldSchema(name:_j, type:int, comment:null), ] -POSTHOOK: Lineage: default___t_x2__._offsets EXPRESSION [(_t)_t.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -PREHOOK: query: drop index src_index_2 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_2 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop index src_index_3 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_3 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop index src_index_4 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_4 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop index src_index_5 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_5 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop index src_index_6 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_6 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop index src_index_7 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_7 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop index src_index_8 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_8 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop index src_index_9 on src -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_9 on src -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@src -PREHOOK: query: drop table `_t` -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@_t -PREHOOK: Output: default@_t -POSTHOOK: query: drop table `_t` -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@_t -POSTHOOK: Output: default@_t -PREHOOK: query: show tables -PREHOOK: type: SHOWTABLES -PREHOOK: Input: database:default -POSTHOOK: query: show tables -POSTHOOK: type: SHOWTABLES -POSTHOOK: Input: database:default -alltypesorc -alltypesparquet -cbo_t1 -cbo_t2 -cbo_t3 -lineitem -part -src -src1 -src_cbo -src_json -src_sequencefile -src_thrift -srcbucket -srcbucket2 -srcpart diff --git ql/src/test/results/clientpositive/index_in_db.q.out ql/src/test/results/clientpositive/index_in_db.q.out deleted file mode 100644 index 6d7b0c30be..0000000000 --- ql/src/test/results/clientpositive/index_in_db.q.out +++ /dev/null @@ -1,57 +0,0 @@ -PREHOOK: query: drop database if exists index_test_db cascade -PREHOOK: type: DROPDATABASE -POSTHOOK: query: drop database if exists index_test_db cascade -POSTHOOK: type: DROPDATABASE -PREHOOK: query: create database index_test_db -PREHOOK: type: CREATEDATABASE -PREHOOK: Output: database:index_test_db -POSTHOOK: query: create database index_test_db -POSTHOOK: type: CREATEDATABASE -POSTHOOK: Output: database:index_test_db -PREHOOK: query: use index_test_db -PREHOOK: type: SWITCHDATABASE -PREHOOK: Input: database:index_test_db -POSTHOOK: query: use index_test_db -POSTHOOK: type: SWITCHDATABASE -POSTHOOK: Input: database:index_test_db -PREHOOK: query: create table testtb (id int, name string) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:index_test_db -PREHOOK: Output: index_test_db@testtb -POSTHOOK: query: create table testtb (id int, name string) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:index_test_db -POSTHOOK: Output: index_test_db@testtb -PREHOOK: query: create index id_index on table testtb (id) as 'COMPACT' WITH DEFERRED REBUILD in table testdb_id_idx_tb -PREHOOK: type: CREATEINDEX -PREHOOK: Input: index_test_db@testtb -POSTHOOK: query: create index id_index on table testtb (id) as 'COMPACT' WITH DEFERRED REBUILD in table testdb_id_idx_tb -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: index_test_db@testtb -POSTHOOK: Output: index_test_db@testdb_id_idx_tb -PREHOOK: query: use default -PREHOOK: type: SWITCHDATABASE -PREHOOK: Input: database:default -POSTHOOK: query: use default -POSTHOOK: type: SWITCHDATABASE -POSTHOOK: Input: database:default -PREHOOK: query: select * from index_test_db.testtb where id>2 -PREHOOK: type: QUERY -PREHOOK: Input: index_test_db@testtb -#### A masked pattern was here #### -POSTHOOK: query: select * from index_test_db.testtb where id>2 -POSTHOOK: type: QUERY -POSTHOOK: Input: index_test_db@testtb -#### A masked pattern was here #### -PREHOOK: query: use index_test_db -PREHOOK: type: SWITCHDATABASE -PREHOOK: Input: database:index_test_db -POSTHOOK: query: use index_test_db -POSTHOOK: type: SWITCHDATABASE -POSTHOOK: Input: database:index_test_db -PREHOOK: query: drop index id_index on testtb -PREHOOK: type: DROPINDEX -PREHOOK: Input: index_test_db@testtb -POSTHOOK: query: drop index id_index on testtb -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: index_test_db@testtb diff --git ql/src/test/results/clientpositive/index_serde.q.out ql/src/test/results/clientpositive/index_serde.q.out deleted file mode 100644 index b5c81e9830..0000000000 --- ql/src/test/results/clientpositive/index_serde.q.out +++ /dev/null @@ -1,242 +0,0 @@ -PREHOOK: query: CREATE TABLE doctors -ROW FORMAT -SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' -STORED AS -INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' -OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' -TBLPROPERTIES ('avro.schema.literal'='{ - "namespace": "testing.hive.avro.serde", - "name": "doctors", - "type": "record", - "fields": [ - { - "name":"number", - "type":"int", - "doc":"Order of playing the role" - }, - { - "name":"first_name", - "type":"string", - "doc":"first name of actor playing role" - }, - { - "name":"last_name", - "type":"string", - "doc":"last name of actor playing role" - } - ] -}') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@doctors -POSTHOOK: query: CREATE TABLE doctors -ROW FORMAT -SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' -STORED AS -INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' -OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' -TBLPROPERTIES ('avro.schema.literal'='{ - "namespace": "testing.hive.avro.serde", - "name": "doctors", - "type": "record", - "fields": [ - { - "name":"number", - "type":"int", - "doc":"Order of playing the role" - }, - { - "name":"first_name", - "type":"string", - "doc":"first name of actor playing role" - }, - { - "name":"last_name", - "type":"string", - "doc":"last name of actor playing role" - } - ] -}') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@doctors -PREHOOK: query: DESCRIBE doctors -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@doctors -POSTHOOK: query: DESCRIBE doctors -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@doctors -number int Order of playing the role -first_name string first name of actor playing role -last_name string last name of actor playing role -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@doctors -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@doctors -PREHOOK: query: CREATE INDEX doctors_index ON TABLE doctors(number) AS 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@doctors -POSTHOOK: query: CREATE INDEX doctors_index ON TABLE doctors(number) AS 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@doctors -POSTHOOK: Output: default@default__doctors_doctors_index__ -PREHOOK: query: DESCRIBE EXTENDED default__doctors_doctors_index__ -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@default__doctors_doctors_index__ -POSTHOOK: query: DESCRIBE EXTENDED default__doctors_doctors_index__ -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@default__doctors_doctors_index__ -number int Order of playing the role -_bucketname string -_offsets array - -#### A masked pattern was here #### -PREHOOK: query: ALTER INDEX doctors_index ON doctors REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@doctors -PREHOOK: Output: default@default__doctors_doctors_index__ -POSTHOOK: query: ALTER INDEX doctors_index ON doctors REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@doctors -POSTHOOK: Output: default@default__doctors_doctors_index__ -POSTHOOK: Lineage: default__doctors_doctors_index__._bucketname SIMPLE [(doctors)doctors.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__doctors_doctors_index__._offsets EXPRESSION [(doctors)doctors.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__doctors_doctors_index__.number SIMPLE [(doctors)doctors.FieldSchema(name:number, type:int, comment:Order of playing the role), ] -PREHOOK: query: EXPLAIN SELECT * FROM doctors WHERE number > 6 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT * FROM doctors WHERE number > 6 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 - Stage-1 depends on stages: Stage-2 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: default__doctors_doctors_index__ - filterExpr: (number > 6) (type: boolean) - Filter Operator - predicate: (number > 6) (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offsets (type: array) - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-8 - Conditional Operator - - Stage: Stage-5 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-2 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: doctors - filterExpr: (number > 6) (type: boolean) - Statistics: Num rows: 1 Data size: 5210 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (number > 6) (type: boolean) - Statistics: Num rows: 1 Data size: 5210 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: number (type: int), first_name (type: string), last_name (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 5210 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 5210 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-6 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-7 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT * FROM doctors WHERE number > 6 -PREHOOK: type: QUERY -PREHOOK: Input: default@default__doctors_doctors_index__ -PREHOOK: Input: default@doctors -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM doctors WHERE number > 6 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__doctors_doctors_index__ -POSTHOOK: Input: default@doctors -#### A masked pattern was here #### -10 David Tennant -11 Matt Smith -7 Sylvester McCoy -8 Paul McGann -9 Christopher Eccleston -PREHOOK: query: DROP INDEX doctors_index ON doctors -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@doctors -POSTHOOK: query: DROP INDEX doctors_index ON doctors -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@doctors -PREHOOK: query: DROP TABLE doctors -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@doctors -PREHOOK: Output: default@doctors -POSTHOOK: query: DROP TABLE doctors -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@doctors -POSTHOOK: Output: default@doctors diff --git ql/src/test/results/clientpositive/index_skewtable.q.out ql/src/test/results/clientpositive/index_skewtable.q.out deleted file mode 100644 index c513a2a6ab..0000000000 --- ql/src/test/results/clientpositive/index_skewtable.q.out +++ /dev/null @@ -1,204 +0,0 @@ -PREHOOK: query: CREATE TABLE kv(key STRING, value STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@kv -POSTHOOK: query: CREATE TABLE kv(key STRING, value STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@kv -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE kv -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@kv -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE kv -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@kv -PREHOOK: query: CREATE INDEX kv_index ON TABLE kv(value) AS 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@kv -POSTHOOK: query: CREATE INDEX kv_index ON TABLE kv(value) AS 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@kv -POSTHOOK: Output: default@default__kv_kv_index__ -PREHOOK: query: DESCRIBE FORMATTED default__kv_kv_index__ -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@default__kv_kv_index__ -POSTHOOK: query: DESCRIBE FORMATTED default__kv_kv_index__ -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@default__kv_kv_index__ -# col_name data_type comment -value string -_bucketname string -_offsets array - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: INDEX_TABLE -Table Parameters: -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [Order(col:value, order:1)] -PREHOOK: query: ALTER INDEX kv_index ON kv REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@kv -PREHOOK: Output: default@default__kv_kv_index__ -POSTHOOK: query: ALTER INDEX kv_index ON kv REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@kv -POSTHOOK: Output: default@default__kv_kv_index__ -POSTHOOK: Lineage: default__kv_kv_index__._bucketname SIMPLE [(kv)kv.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__kv_kv_index__._offsets EXPRESSION [(kv)kv.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__kv_kv_index__.value SIMPLE [(kv)kv.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: EXPLAIN SELECT * FROM kv WHERE value > '15' ORDER BY value -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT * FROM kv WHERE value > '15' ORDER BY value -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 - Stage-1 depends on stages: Stage-2 - Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: default__kv_kv_index__ - filterExpr: (value > '15') (type: boolean) - Filter Operator - predicate: (value > '15') (type: boolean) - Select Operator - expressions: _bucketname (type: string), _offsets (type: array) - outputColumnNames: _col0, _col1 - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-8 - Conditional Operator - - Stage: Stage-5 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-2 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: kv - filterExpr: (value > '15') (type: boolean) - Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (value > '15') (type: boolean) - Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-6 - Map Reduce - Map Operator Tree: - TableScan - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-7 - Move Operator - files: - hdfs directory: true -#### A masked pattern was here #### - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT * FROM kv WHERE value > '15' ORDER BY value -PREHOOK: type: QUERY -PREHOOK: Input: default@default__kv_kv_index__ -PREHOOK: Input: default@kv -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM kv WHERE value > '15' ORDER BY value -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__kv_kv_index__ -POSTHOOK: Input: default@kv -#### A masked pattern was here #### -8 18 -8 18 -2 22 -PREHOOK: query: DROP INDEX kv_index ON kv -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@kv -POSTHOOK: query: DROP INDEX kv_index ON kv -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@kv -PREHOOK: query: DROP TABLE kv -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@kv -PREHOOK: Output: default@kv -POSTHOOK: query: DROP TABLE kv -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@kv -POSTHOOK: Output: default@kv diff --git ql/src/test/results/clientpositive/index_stale.q.out ql/src/test/results/clientpositive/index_stale.q.out deleted file mode 100644 index 7883fccf56..0000000000 --- ql/src/test/results/clientpositive/index_stale.q.out +++ /dev/null @@ -1,106 +0,0 @@ -PREHOOK: query: CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@temp -POSTHOOK: query: CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@temp -PREHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src WHERE key < 50 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@temp -POSTHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src WHERE key < 50 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@temp -POSTHOOK: Lineage: temp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: temp.val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@temp -POSTHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@temp -POSTHOOK: Output: default@default__temp_temp_index__ -PREHOOK: query: ALTER INDEX temp_index ON temp REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@temp -PREHOOK: Output: default@default__temp_temp_index__ -POSTHOOK: query: ALTER INDEX temp_index ON temp REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@temp -POSTHOOK: Output: default@default__temp_temp_index__ -POSTHOOK: Lineage: default__temp_temp_index__._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__temp_temp_index__._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__temp_temp_index__.key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@temp -POSTHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@temp -POSTHOOK: Lineage: temp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: temp.val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: temp - filterExpr: (UDFToDouble(key) = 86.0) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), val (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT * FROM temp WHERE key = 86 -PREHOOK: type: QUERY -PREHOOK: Input: default@temp -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM temp WHERE key = 86 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@temp -#### A masked pattern was here #### -86 val_86 -PREHOOK: query: DROP index temp_index on temp -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@temp -POSTHOOK: query: DROP index temp_index on temp -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@temp -PREHOOK: query: DROP table temp -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@temp -PREHOOK: Output: default@temp -POSTHOOK: query: DROP table temp -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@temp -POSTHOOK: Output: default@temp diff --git ql/src/test/results/clientpositive/index_stale_partitioned.q.out ql/src/test/results/clientpositive/index_stale_partitioned.q.out deleted file mode 100644 index 2138c3311e..0000000000 --- ql/src/test/results/clientpositive/index_stale_partitioned.q.out +++ /dev/null @@ -1,115 +0,0 @@ -PREHOOK: query: CREATE TABLE temp(key STRING, val STRING) PARTITIONED BY (foo string) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@temp -POSTHOOK: query: CREATE TABLE temp(key STRING, val STRING) PARTITIONED BY (foo string) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@temp -PREHOOK: query: ALTER TABLE temp ADD PARTITION (foo = 'bar') -PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@temp -POSTHOOK: query: ALTER TABLE temp ADD PARTITION (foo = 'bar') -POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@temp -POSTHOOK: Output: default@temp@foo=bar -PREHOOK: query: INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src WHERE key < 50 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@temp@foo=bar -POSTHOOK: query: INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src WHERE key < 50 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@temp@foo=bar -POSTHOOK: Lineage: temp PARTITION(foo=bar).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: temp PARTITION(foo=bar).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD -PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@temp -POSTHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD -POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@temp -POSTHOOK: Output: default@default__temp_temp_index__ -PREHOOK: query: ALTER INDEX temp_index ON temp PARTITION (foo = 'bar') REBUILD -PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@temp -PREHOOK: Input: default@temp@foo=bar -PREHOOK: Output: default@default__temp_temp_index__@foo=bar -POSTHOOK: query: ALTER INDEX temp_index ON temp PARTITION (foo = 'bar') REBUILD -POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@temp -POSTHOOK: Input: default@temp@foo=bar -POSTHOOK: Output: default@default__temp_temp_index__@foo=bar -POSTHOOK: Lineage: default__temp_temp_index__ PARTITION(foo=bar)._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__temp_temp_index__ PARTITION(foo=bar)._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__temp_temp_index__ PARTITION(foo=bar).key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@temp@foo=bar -POSTHOOK: query: INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@temp@foo=bar -POSTHOOK: Lineage: temp PARTITION(foo=bar).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: temp PARTITION(foo=bar).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM default__temp_temp_index__ WHERE key = 86 AND foo='bar' -PREHOOK: type: QUERY -PREHOOK: Input: default@default__temp_temp_index__ -PREHOOK: Input: default@default__temp_temp_index__@foo=bar -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM default__temp_temp_index__ WHERE key = 86 AND foo='bar' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__temp_temp_index__ -POSTHOOK: Input: default@default__temp_temp_index__@foo=bar -#### A masked pattern was here #### -PREHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86 AND foo = 'bar' -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86 AND foo = 'bar' -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-0 is a root stage - -STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - TableScan - alias: temp - filterExpr: ((UDFToDouble(key) = 86.0) and (foo = 'bar')) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), val (type: string), 'bar' (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - ListSink - -PREHOOK: query: SELECT * FROM temp WHERE key = 86 AND foo = 'bar' -PREHOOK: type: QUERY -PREHOOK: Input: default@temp -PREHOOK: Input: default@temp@foo=bar -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM temp WHERE key = 86 AND foo = 'bar' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@temp -POSTHOOK: Input: default@temp@foo=bar -#### A masked pattern was here #### -86 val_86 bar -PREHOOK: query: DROP index temp_index on temp -PREHOOK: type: DROPINDEX -PREHOOK: Input: default@temp -POSTHOOK: query: DROP index temp_index on temp -POSTHOOK: type: DROPINDEX -POSTHOOK: Input: default@temp -PREHOOK: query: DROP table temp -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@temp -PREHOOK: Output: default@temp -POSTHOOK: query: DROP table temp -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@temp -POSTHOOK: Output: default@temp