diff --git a/common/src/java/org/apache/hadoop/hive/conf/Constants.java b/common/src/java/org/apache/hadoop/hive/conf/Constants.java index 7a6ee95015..15397392e3 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/Constants.java +++ b/common/src/java/org/apache/hadoop/hive/conf/Constants.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hive.conf; public class Constants { + /* Constants for Hive stats */ + public static final String HIVE_ENGINE = "hive"; + /* Constants for LLAP */ public static final String LLAP_LOGGER_NAME_QUERY_ROUTING = "query-routing"; public static final String LLAP_LOGGER_NAME_CONSOLE = "console"; diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 0212e076cd..6d8c29c3b8 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -721,33 +721,39 @@ public long cleanupEvents() { } @Override - public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, + public List getTableColumnStatistics(String catName, String dbName, String tableName, List colNames) throws MetaException, NoSuchObjectException { return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames); } @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, - List colNames, + List colNames, String engine) throws MetaException, NoSuchObjectException { + return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames, engine); + } + + @Override + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, + List colNames, String engine, String writeIdList) throws MetaException, NoSuchObjectException { - return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames, writeIdList); + return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames, engine, writeIdList); } @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, - String colName) + String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - return objectStore.deleteTableColumnStatistics(catName, dbName, tableName, colName); + return objectStore.deleteTableColumnStatistics(catName, dbName, tableName, colName, engine); } @Override public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, - String partName, List partVals, String colName) + String partName, List partVals, String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { return objectStore.deletePartitionColumnStatistics(catName, dbName, tableName, partName, - partVals, colName); + partVals, colName, engine); } @Override @@ -818,7 +824,7 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } @Override - public List getPartitionColumnStatistics(String catName, String dbName, + public List> getPartitionColumnStatistics(String catName, String dbName, String tblName, List colNames, List partNames) throws MetaException, NoSuchObjectException { @@ -828,11 +834,19 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro @Override public List getPartitionColumnStatistics(String catName, String dbName, String tblName, List colNames, - List partNames, + List partNames, String engine) + throws MetaException, NoSuchObjectException { + return objectStore.getPartitionColumnStatistics(catName, dbName, tblName , colNames, partNames, engine); + } + + @Override + public List getPartitionColumnStatistics(String catName, String dbName, + String tblName, List colNames, + List partNames, String engine, String writeIdList) throws MetaException, NoSuchObjectException { return objectStore.getPartitionColumnStatistics( - catName, dbName, tblName , colNames, partNames, writeIdList); + catName, dbName, tblName , colNames, partNames, engine, writeIdList); } @Override @@ -907,7 +921,8 @@ public Function getFunction(String catName, String dbName, String funcName) @Override public AggrStats get_aggr_stats_for(String catName, String dbName, - String tblName, List partNames, List colNames) + String tblName, List partNames, List colNames, + String engine) throws MetaException { return null; } @@ -915,7 +930,7 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames, - String writeIdList) + String engine, String writeIdList) throws MetaException { return null; } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreTransformer.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreTransformer.java index fb676f2700..9732cd9265 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreTransformer.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreTransformer.java @@ -773,7 +773,7 @@ public void testGetPartitionsByNames() throws Exception { for (int i = 1; i <= pCount; i++) { partValues.add("partcol=" + i); } - List parts = client.getPartitionsByNames(dbName, tblName, partValues, false); + List parts = client.getPartitionsByNames(dbName, tblName, partValues, false, null); assertEquals("Return list size does not match expected size", pCount, parts.size()); tblName = "test_gp_ext_bucketed_wc"; @@ -787,7 +787,7 @@ public void testGetPartitionsByNames() throws Exception { tProps.put("PROPERTIES", properties.toString()); table = createTableWithCapabilities(tProps); - parts = client.getPartitionsByNames(dbName, tblName, partValues, false); + parts = client.getPartitionsByNames(dbName, tblName, partValues, false, null); LOG.debug("Return list size=" + parts.size()); for (Partition part : parts) { @@ -798,7 +798,7 @@ public void testGetPartitionsByNames() throws Exception { capabilities.clear(); capabilities.add("HIVEBUCKET2"); setHMSClient("TestGetPartitionByNames#2", (String[])(capabilities.toArray(new String[0]))); - parts = client.getPartitionsByNames(dbName, tblName, partValues, false); + parts = client.getPartitionsByNames(dbName, tblName, partValues, false, null); for (Partition part : parts) { assertEquals("Partition bucket count does not match", -1, part.getSd().getNumBuckets()); @@ -819,7 +819,7 @@ public void testGetPartitionsByNames() throws Exception { capabilities.clear(); capabilities.add("CONNECTORREAD"); setHMSClient("TestGetPartitionByNames#3", (String[])(capabilities.toArray(new String[0]))); - parts = client.getPartitionsByNames(dbName, tblName, partValues, false); + parts = client.getPartitionsByNames(dbName, tblName, partValues, false, null); assertEquals("Partition count does not match", pCount, parts.size()); LOG.info("Test execution complete:testGetPartitionsByNames"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java index 285f30b008..562b2c9763 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java @@ -6,6 +6,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.*; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; @@ -405,8 +406,9 @@ private void updateTableColStats(String dbName, String tblName, String[] colName ColumnStatistics colStats = new ColumnStatistics(); colStats.setStatsDesc(statsDesc); colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue, avgColLen)); + colStats.setEngine(Constants.HIVE_ENGINE); - SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); + SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); @@ -446,8 +448,9 @@ private void updatePartColStats(String dbName, String tblName, boolean isTxnTabl ColumnStatistics colStats = new ColumnStatistics(); colStats.setStatsDesc(statsDesc); colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue, avgColLen)); + colStats.setEngine(Constants.HIVE_ENGINE); - SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); + SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); @@ -463,7 +466,7 @@ private void updatePartColStats(String dbName, String tblName, boolean isTxnTabl Deadline.startTimer("getPartitionColumnStatistics"); List statRowStore = rawStore.getPartitionColumnStatistics(DEFAULT_CATALOG_NAME, dbName, tblName, - Collections.singletonList(partName), Collections.singletonList(colName[1]), validWriteIds); + Collections.singletonList(partName), Collections.singletonList(colName[1]), Constants.HIVE_ENGINE, validWriteIds); Deadline.stopTimer(); verifyStatString(statRowStore.get(0).getStatsObj().get(0), colName[1], avgColLen); if (isTxnTable) { @@ -682,7 +685,7 @@ private void validatePartPara(String dbName, String tblName, String partName) th } private void deleteColStats(String dbName, String tblName, String[] colName) throws Throwable { - boolean status = hmsHandler.delete_table_column_statistics(dbName, tblName, null); + boolean status = hmsHandler.delete_table_column_statistics(dbName, tblName, null, Constants.HIVE_ENGINE); Assert.assertEquals(status, true); Assert.assertEquals(sharedCache.getTableColStatsFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, Lists.newArrayList(colName[0]), null, true).getStatsObj().isEmpty(), true); @@ -693,7 +696,7 @@ private void deleteColStats(String dbName, String tblName, String[] colName) thr private void deletePartColStats(String dbName, String tblName, String[] colName, String partName) throws Throwable { - boolean status = hmsHandler.delete_partition_column_statistics(dbName, tblName, partName, colName[1]); + boolean status = hmsHandler.delete_partition_column_statistics(dbName, tblName, partName, colName[1], Constants.HIVE_ENGINE); Assert.assertEquals(status, true); SharedCache.ColumStatsWithWriteId colStats = sharedCache.getPartitionColStatsFromCache(DEFAULT_CATALOG_NAME, dbName, @@ -781,8 +784,9 @@ public void testTableColumnStatisticsTxnTableMultiAbort() throws Throwable { ColumnStatistics colStats = new ColumnStatistics(); colStats.setStatsDesc(statsDesc); colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue, avgColLen)); + colStats.setEngine(Constants.HIVE_ENGINE); - SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); + SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); @@ -798,7 +802,7 @@ public void testTableColumnStatisticsTxnTableMultiAbort() throws Throwable { Deadline.startTimer("getPartitionColumnStatistics"); List statRawStore = rawStore.getPartitionColumnStatistics(DEFAULT_CATALOG_NAME, dbName, tblName, - Collections.singletonList(partName), Collections.singletonList(colName[1]), validWriteIds); + Collections.singletonList(partName), Collections.singletonList(colName[1]), Constants.HIVE_ENGINE, validWriteIds); Deadline.stopTimer(); verifyStat(statRawStore.get(0).getStatsObj(), colName, highValue, avgColLen); @@ -847,8 +851,9 @@ public void testTableColumnStatisticsTxnTableOpenTxn() throws Throwable { ColumnStatistics colStats = new ColumnStatistics(); colStats.setStatsDesc(statsDesc); colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue, avgColLen)); + colStats.setEngine(Constants.HIVE_ENGINE); - SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); + SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); @@ -862,7 +867,7 @@ public void testTableColumnStatisticsTxnTableOpenTxn() throws Throwable { Deadline.startTimer("getPartitionColumnStatistics"); List statRawStore = rawStore.getPartitionColumnStatistics(DEFAULT_CATALOG_NAME, dbName, tblName, - Collections.singletonList(partName), Collections.singletonList(colName[1]), validWriteIds); + Collections.singletonList(partName), Collections.singletonList(colName[1]), Constants.HIVE_ENGINE, validWriteIds); Deadline.stopTimer(); verifyStat(statRawStore.get(0).getStatsObj(), colName, highValue, avgColLen); @@ -889,7 +894,7 @@ private void verifyAggrStat(String dbName, String tblName, String[] colName, Lis Deadline.startTimer("getPartitionSpecsByFilterAndProjection"); AggrStats aggrStats = rawStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, partitions, - Collections.singletonList(colName[0]), validWriteIds); + Collections.singletonList(colName[0]), Constants.HIVE_ENGINE, validWriteIds); Deadline.stopTimer(); Assert.assertEquals(aggrStats.getPartsFound(), 2); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getDoubleStats().getHighValue(), highValue, 0.01); @@ -897,7 +902,7 @@ private void verifyAggrStat(String dbName, String tblName, String[] colName, Lis // This will update the cache for non txn table. PartitionsStatsRequest request = new PartitionsStatsRequest(dbName, tblName, - Collections.singletonList(colName[0]), partitions); + Collections.singletonList(colName[0]), partitions, Constants.HIVE_ENGINE); request.setCatName(DEFAULT_CATALOG_NAME); request.setValidWriteIdList(validWriteIds); AggrStats aggrStatsCached = hmsHandler.get_aggr_stats_for(request); @@ -962,21 +967,22 @@ public void testAggrStatTxnTable() throws Throwable { ColumnStatistics colStats = new ColumnStatistics(); colStats.setStatsDesc(statsDesc); colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, 5, 20)); + colStats.setEngine(Constants.HIVE_ENGINE); - SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); + SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); hmsHandler.update_partition_column_statistics_req(setTblColStat); Deadline.startTimer("getPartitionSpecsByFilterAndProjection"); AggrStats aggrStats = rawStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, partitions, - Collections.singletonList(colName[0]), validWriteIds); + Collections.singletonList(colName[0]), Constants.HIVE_ENGINE, validWriteIds); Deadline.stopTimer(); Assert.assertEquals(aggrStats, null); // keep the txn open and verify that the stats got is not compliant. PartitionsStatsRequest request = new PartitionsStatsRequest(dbName, tblName, - Collections.singletonList(colName[0]), partitions); + Collections.singletonList(colName[0]), partitions, Constants.HIVE_ENGINE); request.setCatName(DEFAULT_CATALOG_NAME); request.setValidWriteIdList(validWriteIds); AggrStats aggrStatsCached = hmsHandler.get_aggr_stats_for(request); @@ -1013,8 +1019,9 @@ public void testAggrStatAbortTxn() throws Throwable { ColumnStatistics colStats = new ColumnStatistics(); colStats.setStatsDesc(statsDesc); colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, 5, 20)); + colStats.setEngine(Constants.HIVE_ENGINE); - SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); + SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); hmsHandler.update_partition_column_statistics_req(setTblColStat); @@ -1024,13 +1031,13 @@ public void testAggrStatAbortTxn() throws Throwable { Deadline.startTimer("getPartitionSpecsByFilterAndProjection"); AggrStats aggrStats = rawStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, partitions, - Collections.singletonList(colName[0]), validWriteIds); + Collections.singletonList(colName[0]), Constants.HIVE_ENGINE, validWriteIds); Deadline.stopTimer(); Assert.assertEquals(aggrStats, null); // keep the txn open and verify that the stats got is not compliant. PartitionsStatsRequest request = new PartitionsStatsRequest(dbName, tblName, - Collections.singletonList(colName[0]), partitions); + Collections.singletonList(colName[0]), partitions, Constants.HIVE_ENGINE); request.setCatName(DEFAULT_CATALOG_NAME); request.setValidWriteIdList(validWriteIds); AggrStats aggrStatsCached = hmsHandler.get_aggr_stats_for(request); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java index 6326bc34f2..ca8ac48811 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; @@ -474,7 +475,7 @@ public Table getTable(String dbName, String tableName) throws Exception { * @return - list of ColumnStatisticsObj objects in the order of the specified columns */ public List getTableColumnStatistics(String dbName, String tableName) throws Exception { - return client.getTableColumnStatistics(dbName, tableName, getTableColNames(dbName, tableName)); + return client.getTableColumnStatistics(dbName, tableName, getTableColNames(dbName, tableName), Constants.HIVE_ENGINE); } /** @@ -500,7 +501,7 @@ public Table getTable(String dbName, String tableName) throws Exception { List colNames = new ArrayList(); client.getFields(dbName, tableName).forEach(fs -> colNames.add(fs.getName())); return client.getPartitionColumnStatistics(dbName, tableName, - client.listPartitionNames(dbName, tableName, (short) -1), colNames); + client.listPartitionNames(dbName, tableName, (short) -1), colNames, Constants.HIVE_ENGINE); } /** @@ -516,7 +517,7 @@ public Table getTable(String dbName, String tableName) throws Exception { String partName, List colNames) throws Exception { return client.getPartitionColumnStatistics(dbName, tableName, - Collections.singletonList(partName), colNames).get(0); + Collections.singletonList(partName), colNames, Constants.HIVE_ENGINE).get(0); } public List getAllPartitions(String dbName, String tableName) throws Exception { diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java index 61be5a3a5b..b87c0da75d 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java @@ -47,6 +47,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; @@ -408,7 +409,7 @@ private void testStatsAfterCompactionPartTbl(boolean newStreamingAPI) throws Exc //now make sure we get the stats we expect for partition we are going to add data to later Map> stats = msClient.getPartitionColumnStatistics(ci.dbname, - ci.tableName, Arrays.asList(ci.partName), colNames); + ci.tableName, Arrays.asList(ci.partName), colNames, Constants.HIVE_ENGINE); List colStats = stats.get(ci.partName); assertNotNull("No stats found for partition " + ci.partName, colStats); Assert.assertEquals("Expected column 'a' at index 0", "a", colStats.get(0).getColName()); @@ -426,7 +427,7 @@ private void testStatsAfterCompactionPartTbl(boolean newStreamingAPI) throws Exc //now save stats for partition we won't modify stats = msClient.getPartitionColumnStatistics(ciPart2.dbname, - ciPart2.tableName, Arrays.asList(ciPart2.partName), colNames); + ciPart2.tableName, Arrays.asList(ciPart2.partName), colNames, Constants.HIVE_ENGINE); colStats = stats.get(ciPart2.partName); LongColumnStatsData colAStatsPart2 = colStats.get(0).getStatsData().getLongStats(); StringColumnStatsData colBStatsPart2 = colStats.get(1).getStatsData().getStringStats(); @@ -498,7 +499,7 @@ private void testStatsAfterCompactionPartTbl(boolean newStreamingAPI) throws Exc Assert.assertEquals("ready for cleaning", compacts.get(0).getState()); stats = msClient.getPartitionColumnStatistics(ci.dbname, ci.tableName, - Arrays.asList(ci.partName), colNames); + Arrays.asList(ci.partName), colNames, Constants.HIVE_ENGINE); colStats = stats.get(ci.partName); assertNotNull("No stats found for partition " + ci.partName, colStats); Assert.assertEquals("Expected column 'a' at index 0", "a", colStats.get(0).getColName()); @@ -517,7 +518,7 @@ private void testStatsAfterCompactionPartTbl(boolean newStreamingAPI) throws Exc //now check that stats for partition we didn't modify did not change stats = msClient.getPartitionColumnStatistics(ciPart2.dbname, ciPart2.tableName, - Arrays.asList(ciPart2.partName), colNames); + Arrays.asList(ciPart2.partName), colNames, Constants.HIVE_ENGINE); colStats = stats.get(ciPart2.partName); Assert.assertEquals("Expected stats for " + ciPart2.partName + " to stay the same", colAStatsPart2, colStats.get(0).getStatsData().getLongStats()); diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java index 2389c3bc68..fd22fbac99 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java @@ -40,6 +40,7 @@ import java.util.stream.Stream; import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hive.testutils.HiveTestEnvSetup; @@ -219,6 +220,9 @@ public int compare(String str1, String str2) { s.execute("ALTER TABLE APP.TAB_COL_STATS ADD COLUMN CAT_NAME VARCHAR(256)"); s.execute("update APP.TAB_COL_STATS set CAT_NAME = '" + Warehouse.DEFAULT_CATALOG_NAME + "'"); + s.execute("ALTER TABLE APP.TAB_COL_STATS ADD COLUMN ENGINE VARCHAR(128)"); + s.execute("update APP.TAB_COL_STATS set ENGINE = '" + Constants.HIVE_ENGINE + "'"); + s.close(); conn.close(); diff --git a/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql index cf3bfaa2b1..3e387d04eb 100644 --- a/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql +++ b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql @@ -734,6 +734,7 @@ CREATE EXTERNAL TABLE IF NOT EXISTS `TAB_COL_STATS` ( `NUM_TRUES` bigint, `NUM_FALSES` bigint, `LAST_ANALYZED` bigint, + `ENGINE` string, CONSTRAINT `SYS_PK_TAB_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' @@ -759,7 +760,8 @@ TBLPROPERTIES ( \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", - \"LAST_ANALYZED\" + \"LAST_ANALYZED\", + \"ENGINE\" FROM \"TAB_COL_STATS\"" ); @@ -785,6 +787,7 @@ CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_STATS` ( `NUM_TRUES` bigint, `NUM_FALSES` bigint, `LAST_ANALYZED` bigint, + `ENGINE` string, CONSTRAINT `SYS_PK_PART_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' @@ -811,7 +814,8 @@ TBLPROPERTIES ( \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", - \"LAST_ANALYZED\" + \"LAST_ANALYZED\", + \"ENGINE\" FROM \"PART_COL_STATS\"" ); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableDesc.java index 1b6db58141..00eff3d56e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableDesc.java @@ -894,7 +894,7 @@ public Table toTable(HiveConf conf) throws HiveException { colStatsDesc.setCatName(tbl.getCatName()); colStatsDesc.setDbName(tbl.getDbName()); colStatsDesc.setTableName(tbl.getTableName()); - tbl.getTTable().setColStats(new ColumnStatistics(colStatsDesc, colStats.getStatsObj())); + tbl.getTTable().setColStats(new ColumnStatistics(colStatsDesc, colStats.getStatsObj(), colStats.getEngine())); // Statistics will have an associated write Id for a transactional table. We need it to // update column statistics. if (replWriteId > 0) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java index 97561911c2..10a2947b16 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; @@ -282,6 +283,7 @@ private ColumnStatistics constructColumnStatsFromInput() ColumnStatistics colStat = new ColumnStatistics(); colStat.setStatsDesc(statsDesc); colStat.addToStatsObj(statsObj); + colStat.setEngine(Constants.HIVE_ENGINE); return colStat; } @@ -302,7 +304,7 @@ private ColumnStatisticsDesc getColumnStatsDesc(String dbName, private int persistColumnStats(Hive db) throws HiveException, MetaException, IOException { ColumnStatistics colStats = constructColumnStatsFromInput(); SetPartitionsStatsRequest request = - new SetPartitionsStatsRequest(Collections.singletonList(colStats)); + new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); // Set writeId and validWriteId list for replicated statistics. getColStats() will return // non-null value only during replication. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java index 64f9af3aba..6b9ea77101 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java @@ -201,7 +201,7 @@ private AlterTableAddPartitionDesc partitionDesc(Path fromPath, ColumnStatisticsDesc colStatsDesc = new ColumnStatisticsDesc(colStats.getStatsDesc()); colStatsDesc.setTableName(tblDesc.getTableName()); colStatsDesc.setDbName(tblDesc.getDatabaseName()); - partDesc.setColStats(new ColumnStatistics(colStatsDesc, colStats.getStatsObj())); + partDesc.setColStats(new ColumnStatistics(colStatsDesc, colStats.getStatsObj(), colStats.getEngine())); long writeId = replicationSpec().isMigratingToTxnTable() ? ReplUtils.REPL_BOOTSTRAP_MIGRATION_BASE_WRITE_ID : partition.getWriteId(); partDesc.setWriteId(writeId); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 691f3ee2e9..85daf5cea3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -92,6 +92,7 @@ import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable; import org.apache.hadoop.hive.common.log.InPlaceUpdate; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.io.HdfsUtils; @@ -1321,9 +1322,9 @@ public Table getTable(final String dbName, final String tableName, boolean throw dbName, tableName); } tTable = getMSC().getTable(getDefaultCatalog(conf), dbName, tableName, - validWriteIdList != null ? validWriteIdList.toString() : null, getColumnStats); + validWriteIdList != null ? validWriteIdList.toString() : null, getColumnStats, Constants.HIVE_ENGINE); } else { - tTable = getMSC().getTable(dbName, tableName, getColumnStats); + tTable = getMSC().getTable(dbName, tableName, getColumnStats, Constants.HIVE_ENGINE); } } catch (NoSuchObjectException e) { if (throwException) { @@ -3773,7 +3774,7 @@ public boolean dropPartition(String dbName, String tableName, List partV for (int i = 0; i < nBatches; ++i) { List tParts = getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(), - partNames.subList(i*batchSize, (i+1)*batchSize), getColStats); + partNames.subList(i*batchSize, (i+1)*batchSize), getColStats, Constants.HIVE_ENGINE); if (tParts != null) { for (org.apache.hadoop.hive.metastore.api.Partition tpart: tParts) { partitions.add(new Partition(tbl, tpart)); @@ -3784,7 +3785,7 @@ public boolean dropPartition(String dbName, String tableName, List partV if (nParts > nBatches * batchSize) { List tParts = getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(), - partNames.subList(nBatches*batchSize, nParts), getColStats); + partNames.subList(nBatches*batchSize, nParts), getColStats, Constants.HIVE_ENGINE); if (tParts != null) { for (org.apache.hadoop.hive.metastore.api.Partition tpart: tParts) { partitions.add(new Partition(tbl, tpart)); @@ -5166,10 +5167,10 @@ public boolean setPartitionColumnStatistics( if (checkTransactional) { Table tbl = getTable(dbName, tableName); AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); - retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames, + retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames, Constants.HIVE_ENGINE, tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); } else { - retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames); + retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames, Constants.HIVE_ENGINE); } return retv; } catch (Exception e) { @@ -5191,7 +5192,7 @@ public boolean setPartitionColumnStatistics( } return getMSC().getPartitionColumnStatistics( - dbName, tableName, partNames, colNames, writeIdList); + dbName, tableName, partNames, colNames, Constants.HIVE_ENGINE, writeIdList); } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -5207,7 +5208,7 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName, AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); writeIdList = tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null; } - return getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName, writeIdList); + return getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName, Constants.HIVE_ENGINE, writeIdList); } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); return new AggrStats(new ArrayList(),0); @@ -5217,7 +5218,7 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName, public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws HiveException { try { - return getMSC().deleteTableColumnStatistics(dbName, tableName, colName); + return getMSC().deleteTableColumnStatistics(dbName, tableName, colName, Constants.HIVE_ENGINE); } catch(Exception e) { LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -5227,7 +5228,7 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, String colName) throws HiveException { try { - return getMSC().deletePartitionColumnStatistics(dbName, tableName, partName, colName); + return getMSC().deletePartitionColumnStatistics(dbName, tableName, partName, colName, Constants.HIVE_ENGINE); } catch(Exception e) { LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index d39a0b487f..a2c84b4620 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -181,12 +181,12 @@ public void truncateTable(String dbName, String tableName, @Override public org.apache.hadoop.hive.metastore.api.Table getTable(String dbname, String name) throws MetaException, TException, NoSuchObjectException { - return getTable(dbname, name, false); + return getTable(dbname, name, false, null); } @Override public org.apache.hadoop.hive.metastore.api.Table getTable(String dbname, String name, - boolean getColStats) throws MetaException, + boolean getColStats, String engine) throws MetaException, TException, NoSuchObjectException { // First check temp tables org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbname, name); @@ -194,7 +194,7 @@ public void truncateTable(String dbName, String tableName, return deepCopy(table); // Original method used deepCopy(), do the same here. } // Try underlying client - return super.getTable(MetaStoreUtils.getDefaultCatalog(conf), dbname, name, getColStats); + return super.getTable(MetaStoreUtils.getDefaultCatalog(conf), dbname, name, getColStats, engine); } // Need to override this one too or dropTable breaks because it doesn't find the table when checks @@ -202,19 +202,19 @@ public void truncateTable(String dbName, String tableName, @Override public org.apache.hadoop.hive.metastore.api.Table getTable(String catName, String dbName, String tableName) throws TException { - return getTable(catName, dbName, tableName, false); + return getTable(catName, dbName, tableName, false, null); } // Need to override this one too or dropTable breaks because it doesn't find the table when checks // before the drop. @Override public org.apache.hadoop.hive.metastore.api.Table getTable(String catName, String dbName, - String tableName, boolean getColStats) + String tableName, boolean getColStats, String engine) throws TException { if (!DEFAULT_CATALOG_NAME.equals(catName)) { - return super.getTable(catName, dbName, tableName, getColStats); + return super.getTable(catName, dbName, tableName, getColStats, engine); } else { - return getTable(dbName, tableName, getColStats); + return getTable(dbName, tableName, getColStats, engine); } } @@ -503,23 +503,23 @@ public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) /** {@inheritDoc} */ @Override public List getTableColumnStatistics(String dbName, String tableName, - List colNames) throws NoSuchObjectException, MetaException, TException, + List colNames, String engine) throws NoSuchObjectException, MetaException, TException, InvalidInputException, InvalidObjectException { if (getTempTable(dbName, tableName) != null) { return getTempTableColumnStats(dbName, tableName, colNames); } - return super.getTableColumnStatistics(dbName, tableName, colNames); + return super.getTableColumnStatistics(dbName, tableName, colNames, engine); } /** {@inheritDoc} */ @Override - public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) + public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName, String engine) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException { if (getTempTable(dbName, tableName) != null) { return deleteTempTableColumnStats(dbName, tableName, colName); } - return super.deleteTableColumnStatistics(dbName, tableName, colName); + return super.deleteTableColumnStatistics(dbName, tableName, colName, engine); } private void createTempTable(org.apache.hadoop.hive.metastore.api.Table tbl, @@ -1416,11 +1416,11 @@ public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, Stri @Override public List getPartitionsByNames(String catName, String dbName, String tblName, - List partNames, boolean getColStats) throws TException { + List partNames, boolean getColStats, String engine) throws TException { org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbName, tblName); if (table == null) { //(assume) not a temp table - Try underlying client - return super.getPartitionsByNames(catName, dbName, tblName, partNames, getColStats); + return super.getPartitionsByNames(catName, dbName, tblName, partNames, getColStats, engine); } TempTable tt = getPartitionedTempTable(table); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java index 22cb4e20e2..54de1d71fd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java @@ -20,6 +20,7 @@ import com.google.common.collect.Lists; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.DateColumnStatsData; @@ -471,7 +472,7 @@ else if (udaf instanceof GenericUDAFCount) { hive.getMSC().getTableColumnStatistics( tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName), - tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); + Constants.HIVE_ENGINE, tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); if (stats.isEmpty()) { Logger.debug("No stats for " + tbl.getTableName() + " column " + colName); return null; @@ -532,7 +533,7 @@ else if (udaf instanceof GenericUDAFCount) { hive.getMSC().getTableColumnStatistics( tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName), - tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); + Constants.HIVE_ENGINE, tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); if (stats.isEmpty()) { Logger.debug("No stats for " + tbl.getTableName() + " column " + colName); return null; @@ -675,7 +676,7 @@ else if (udaf instanceof GenericUDAFCount) { ColumnStatisticsData statData = hive.getMSC().getTableColumnStatistics( tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName), - tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null) + Constants.HIVE_ENGINE, tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null) .get(0).getStatsData(); String name = colDesc.getTypeString().toUpperCase(); switch (type) { @@ -912,7 +913,7 @@ private ColumnStatisticsData validateSingleColStat(List sta Map> result = hive.getMSC().getPartitionColumnStatistics( tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName), - tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); + Constants.HIVE_ENGINE, tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); if (result.size() != parts.size()) { Logger.debug("Received " + result.size() + " stats for " + parts.size() + " partitions"); return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java index 2e25ecef65..1a339633d4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java @@ -23,6 +23,7 @@ import java.util.Collection; import java.util.List; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.Warehouse; @@ -145,6 +146,7 @@ public int process(Hive db, Table tbl) throws Exception { ColumnStatistics colStats = new ColumnStatistics(); colStats.setStatsDesc(statsDesc); colStats.setStatsObj(statsObjs); + colStats.setEngine(Constants.HIVE_ENGINE); stats.add(colStats); } } @@ -177,7 +179,7 @@ public int persistColumnStats(Hive db, Table tbl) throws HiveException, MetaExce if (colStats.isEmpty()) { return 0; } - SetPartitionsStatsRequest request = new SetPartitionsStatsRequest(colStats); + SetPartitionsStatsRequest request = new SetPartitionsStatsRequest(colStats, Constants.HIVE_ENGINE); request.setNeedMerge(colStatDesc.isNeedMerge()); HiveTxnManager txnMgr = AcidUtils.isTransactionalTable(tbl) ? SessionState.get().getTxnMgr() : null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java index 8acb1c54db..e8f382750d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.MetaStoreThread; import org.apache.hadoop.hive.metastore.ObjectStore; @@ -444,7 +445,7 @@ private String buildPartColStr(Table table) { try { // Note: this should NOT do txn verification - we want to get outdated stats, to // see if we need to update anything. - existingStats = rs.getTableColumnStatistics(cat, db, tbl, allCols); + existingStats = rs.getTableColumnStatistics(cat, db, tbl, allCols, Constants.HIVE_ENGINE); } catch (NoSuchObjectException e) { LOG.error("Cannot retrieve existing stats, skipping " + fullTableName, e); return null; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java index 4859222d20..339c273bc0 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; @@ -481,7 +482,7 @@ public void testTxnStatsOnOff() throws Exception { List stats; validWriteIds = msClient.getValidWriteIds("default." + tableName).toString(); stats = msClient.getTableColumnStatistics( - "default", tableName, Lists.newArrayList("a"), validWriteIds); + "default", tableName, Lists.newArrayList("a"), Constants.HIVE_ENGINE, validWriteIds); return stats; } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java index 0db926acfb..f15c834a07 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java @@ -19,6 +19,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -784,7 +785,7 @@ public void testCompactStatsGather() throws Exception { List colNames = new ArrayList<>(); colNames.add("a"); Map> map = hms.getPartitionColumnStatistics("default", - "T", partNames, colNames); + "T", partNames, colNames, Constants.HIVE_ENGINE); Assert.assertEquals(4, map.get(partNames.get(0)).get(0).getStatsData().getLongStats().getHighValue()); @@ -831,7 +832,7 @@ public void testCompactStatsGather() throws Exception { Assert.assertTrue(resp.getCompacts().get(0).getHadoopJobId().startsWith("job_local")); //now check that stats were updated - map = hms.getPartitionColumnStatistics("default","T", partNames, colNames); + map = hms.getPartitionColumnStatistics("default","T", partNames, colNames, Constants.HIVE_ENGINE); Assert.assertEquals("", 5, map.get(partNames.get(0)).get(0).getStatsData().getLongStats().getHighValue()); } @Test diff --git a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java index a2f8bab876..9a19e41941 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; @@ -263,7 +264,7 @@ public void testTxnPartitions() throws Exception { // We expect two partitions to be updated. Map> stats = msClient.getPartitionColumnStatistics( dbName, tblName, Lists.newArrayList("p=1", "p=2", "p=3"), - Lists.newArrayList("s"), currentWriteIds); + Lists.newArrayList("s"), Constants.HIVE_ENGINE, currentWriteIds); assertEquals(1, stats.size()); assertTrue(su.runOneIteration()); @@ -271,20 +272,20 @@ public void testTxnPartitions() throws Exception { // Analyze treats stats like data (new write ID), so stats still should not be valid. stats = msClient.getPartitionColumnStatistics( dbName, tblName, Lists.newArrayList("p=1", "p=2", "p=3"), - Lists.newArrayList("s"), currentWriteIds); + Lists.newArrayList("s"), Constants.HIVE_ENGINE, currentWriteIds); assertEquals(1, stats.size()); // Test with null list of partNames stats = msClient.getPartitionColumnStatistics( dbName, tblName, null, - Lists.newArrayList("s"), currentWriteIds); + Lists.newArrayList("s"), Constants.HIVE_ENGINE, currentWriteIds); assertEquals(0, stats.size()); // New reader. currentWriteIds = msClient.getValidWriteIds(fqName).toString(); stats = msClient.getPartitionColumnStatistics( dbName, tblName, Lists.newArrayList("p=1", "p=2", "p=3"), - Lists.newArrayList("s"), currentWriteIds); + Lists.newArrayList("s"), Constants.HIVE_ENGINE, currentWriteIds); assertEquals(3, stats.size()); msClient.close(); diff --git a/ql/src/test/results/clientpositive/llap/resourceplan.q.out b/ql/src/test/results/clientpositive/llap/resourceplan.q.out index bf79d75e8b..be8f1ec8b0 100644 --- a/ql/src/test/results/clientpositive/llap/resourceplan.q.out +++ b/ql/src/test/results/clientpositive/llap/resourceplan.q.out @@ -1646,6 +1646,7 @@ PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TAB_COL_STATS` ( `NUM_TRUES` bigint, `NUM_FALSES` bigint, `LAST_ANALYZED` bigint, + `ENGINE` int, CONSTRAINT `SYS_PK_TAB_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' @@ -1671,7 +1672,8 @@ TBLPROPERTIES ( \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", - \"LAST_ANALYZED\" + \"LAST_ANALYZED\", + \"ENGINE\" FROM \"TAB_COL_STATS\"" ) @@ -1698,6 +1700,7 @@ POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TAB_COL_STATS` ( `NUM_TRUES` bigint, `NUM_FALSES` bigint, `LAST_ANALYZED` bigint, + `ENGINE` int, CONSTRAINT `SYS_PK_TAB_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' @@ -1723,7 +1726,8 @@ TBLPROPERTIES ( \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", - \"LAST_ANALYZED\" + \"LAST_ANALYZED\", + \"ENGINE\" FROM \"TAB_COL_STATS\"" ) @@ -1751,6 +1755,7 @@ PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_STATS` ( `NUM_TRUES` bigint, `NUM_FALSES` bigint, `LAST_ANALYZED` bigint, + `ENGINE` int, CONSTRAINT `SYS_PK_PART_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' @@ -1777,7 +1782,8 @@ TBLPROPERTIES ( \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", - \"LAST_ANALYZED\" + \"LAST_ANALYZED\", + \"ENGINE\" FROM \"PART_COL_STATS\"" ) @@ -1805,6 +1811,7 @@ POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_STATS` ( `NUM_TRUES` bigint, `NUM_FALSES` bigint, `LAST_ANALYZED` bigint, + `ENGINE` int, CONSTRAINT `SYS_PK_PART_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' @@ -1831,7 +1838,8 @@ TBLPROPERTIES ( \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", - \"LAST_ANALYZED\" + \"LAST_ANALYZED\", + \"ENGINE\" FROM \"PART_COL_STATS\"" ) diff --git a/ql/src/test/results/clientpositive/llap/strict_managed_tables_sysdb.q.out b/ql/src/test/results/clientpositive/llap/strict_managed_tables_sysdb.q.out index 6d874581bd..3f9b28e556 100644 --- a/ql/src/test/results/clientpositive/llap/strict_managed_tables_sysdb.q.out +++ b/ql/src/test/results/clientpositive/llap/strict_managed_tables_sysdb.q.out @@ -1736,6 +1736,7 @@ PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TAB_COL_STATS` ( `NUM_TRUES` bigint, `NUM_FALSES` bigint, `LAST_ANALYZED` bigint, + `ENGINE` int, CONSTRAINT `SYS_PK_TAB_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' @@ -1761,7 +1762,8 @@ TBLPROPERTIES ( \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", - \"LAST_ANALYZED\" + \"LAST_ANALYZED\", + \"ENGINE\" FROM \"TAB_COL_STATS\"" ) @@ -1788,6 +1790,7 @@ POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TAB_COL_STATS` ( `NUM_TRUES` bigint, `NUM_FALSES` bigint, `LAST_ANALYZED` bigint, + `ENGINE` int, CONSTRAINT `SYS_PK_TAB_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' @@ -1813,7 +1816,8 @@ TBLPROPERTIES ( \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", - \"LAST_ANALYZED\" + \"LAST_ANALYZED\", + \"ENGINE\" FROM \"TAB_COL_STATS\"" ) @@ -1841,6 +1845,7 @@ PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_STATS` ( `NUM_TRUES` bigint, `NUM_FALSES` bigint, `LAST_ANALYZED` bigint, + `ENGINE` int, CONSTRAINT `SYS_PK_PART_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' @@ -1867,7 +1872,8 @@ TBLPROPERTIES ( \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", - \"LAST_ANALYZED\" + \"LAST_ANALYZED\", + \"ENGINE\" FROM \"PART_COL_STATS\"" ) @@ -1895,6 +1901,7 @@ POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_STATS` ( `NUM_TRUES` bigint, `NUM_FALSES` bigint, `LAST_ANALYZED` bigint, + `ENGINE` int, CONSTRAINT `SYS_PK_PART_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' @@ -1921,7 +1928,8 @@ TBLPROPERTIES ( \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", - \"LAST_ANALYZED\" + \"LAST_ANALYZED\", + \"ENGINE\" FROM \"PART_COL_STATS\"" ) diff --git a/ql/src/test/results/clientpositive/llap/sysdb.q.out b/ql/src/test/results/clientpositive/llap/sysdb.q.out index db15a2a27e..a22789fb69 100644 --- a/ql/src/test/results/clientpositive/llap/sysdb.q.out +++ b/ql/src/test/results/clientpositive/llap/sysdb.q.out @@ -1736,6 +1736,7 @@ PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TAB_COL_STATS` ( `NUM_TRUES` bigint, `NUM_FALSES` bigint, `LAST_ANALYZED` bigint, + `ENGINE` int, CONSTRAINT `SYS_PK_TAB_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' @@ -1761,7 +1762,8 @@ TBLPROPERTIES ( \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", - \"LAST_ANALYZED\" + \"LAST_ANALYZED\", + \"ENGINE\" FROM \"TAB_COL_STATS\"" ) @@ -1788,6 +1790,7 @@ POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `TAB_COL_STATS` ( `NUM_TRUES` bigint, `NUM_FALSES` bigint, `LAST_ANALYZED` bigint, + `ENGINE` int, CONSTRAINT `SYS_PK_TAB_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' @@ -1813,7 +1816,8 @@ TBLPROPERTIES ( \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", - \"LAST_ANALYZED\" + \"LAST_ANALYZED\", + \"ENGINE\" FROM \"TAB_COL_STATS\"" ) @@ -1841,6 +1845,7 @@ PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_STATS` ( `NUM_TRUES` bigint, `NUM_FALSES` bigint, `LAST_ANALYZED` bigint, + `ENGINE` int, CONSTRAINT `SYS_PK_PART_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' @@ -1867,7 +1872,8 @@ TBLPROPERTIES ( \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", - \"LAST_ANALYZED\" + \"LAST_ANALYZED\", + \"ENGINE\" FROM \"PART_COL_STATS\"" ) @@ -1895,6 +1901,7 @@ POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_STATS` ( `NUM_TRUES` bigint, `NUM_FALSES` bigint, `LAST_ANALYZED` bigint, + `ENGINE` int, CONSTRAINT `SYS_PK_PART_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' @@ -1921,7 +1928,8 @@ TBLPROPERTIES ( \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", - \"LAST_ANALYZED\" + \"LAST_ANALYZED\", + \"ENGINE\" FROM \"PART_COL_STATS\"" ) @@ -3949,6 +3957,7 @@ part_col_stats cs_id part_col_stats db_name part_col_stats double_high_value part_col_stats double_low_value +part_col_stats engine part_col_stats last_analyzed part_col_stats long_high_value part_col_stats long_low_value @@ -4091,6 +4100,7 @@ tab_col_stats cs_id tab_col_stats db_name tab_col_stats double_high_value tab_col_stats double_low_value +tab_col_stats engine tab_col_stats last_analyzed tab_col_stats long_high_value tab_col_stats long_low_value @@ -4723,6 +4733,7 @@ max_col_len bigint from deserializer num_trues bigint from deserializer num_falses bigint from deserializer last_analyzed bigint from deserializer +engine int from deserializer PREHOOK: query: explain select max(num_distincts) from sys.tab_col_stats PREHOOK: type: QUERY PREHOOK: Input: sys@tab_col_stats diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java index fd4619f0e5..6a4b45e4ae 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java @@ -41,6 +41,7 @@ private static final org.apache.thrift.protocol.TField STATS_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("statsDesc", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField STATS_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("statsObj", org.apache.thrift.protocol.TType.LIST, (short)2); private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)3); + private static final org.apache.thrift.protocol.TField ENGINE_FIELD_DESC = new org.apache.thrift.protocol.TField("engine", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -51,12 +52,14 @@ private ColumnStatisticsDesc statsDesc; // required private List statsObj; // required private boolean isStatsCompliant; // optional + private String engine; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { STATS_DESC((short)1, "statsDesc"), STATS_OBJ((short)2, "statsObj"), - IS_STATS_COMPLIANT((short)3, "isStatsCompliant"); + IS_STATS_COMPLIANT((short)3, "isStatsCompliant"), + ENGINE((short)4, "engine"); private static final Map byName = new HashMap(); @@ -77,6 +80,8 @@ public static _Fields findByThriftId(int fieldId) { return STATS_OBJ; case 3: // IS_STATS_COMPLIANT return IS_STATS_COMPLIANT; + case 4: // ENGINE + return ENGINE; default: return null; } @@ -130,6 +135,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class)))); tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.ENGINE, new org.apache.thrift.meta_data.FieldMetaData("engine", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnStatistics.class, metaDataMap); } @@ -139,11 +146,13 @@ public ColumnStatistics() { public ColumnStatistics( ColumnStatisticsDesc statsDesc, - List statsObj) + List statsObj, + String engine) { this(); this.statsDesc = statsDesc; this.statsObj = statsObj; + this.engine = engine; } /** @@ -162,6 +171,9 @@ public ColumnStatistics(ColumnStatistics other) { this.statsObj = __this__statsObj; } this.isStatsCompliant = other.isStatsCompliant; + if (other.isSetEngine()) { + this.engine = other.engine; + } } public ColumnStatistics deepCopy() { @@ -174,6 +186,7 @@ public void clear() { this.statsObj = null; setIsStatsCompliantIsSet(false); this.isStatsCompliant = false; + this.engine = null; } public ColumnStatisticsDesc getStatsDesc() { @@ -259,6 +272,29 @@ public void setIsStatsCompliantIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value); } + public String getEngine() { + return this.engine; + } + + public void setEngine(String engine) { + this.engine = engine; + } + + public void unsetEngine() { + this.engine = null; + } + + /** Returns true if field engine is set (has been assigned a value) and false otherwise */ + public boolean isSetEngine() { + return this.engine != null; + } + + public void setEngineIsSet(boolean value) { + if (!value) { + this.engine = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case STATS_DESC: @@ -285,6 +321,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case ENGINE: + if (value == null) { + unsetEngine(); + } else { + setEngine((String)value); + } + break; + } } @@ -299,6 +343,9 @@ public Object getFieldValue(_Fields field) { case IS_STATS_COMPLIANT: return isIsStatsCompliant(); + case ENGINE: + return getEngine(); + } throw new IllegalStateException(); } @@ -316,6 +363,8 @@ public boolean isSet(_Fields field) { return isSetStatsObj(); case IS_STATS_COMPLIANT: return isSetIsStatsCompliant(); + case ENGINE: + return isSetEngine(); } throw new IllegalStateException(); } @@ -360,6 +409,15 @@ public boolean equals(ColumnStatistics that) { return false; } + boolean this_present_engine = true && this.isSetEngine(); + boolean that_present_engine = true && that.isSetEngine(); + if (this_present_engine || that_present_engine) { + if (!(this_present_engine && that_present_engine)) + return false; + if (!this.engine.equals(that.engine)) + return false; + } + return true; } @@ -382,6 +440,11 @@ public int hashCode() { if (present_isStatsCompliant) list.add(isStatsCompliant); + boolean present_engine = true && (isSetEngine()); + list.add(present_engine); + if (present_engine) + list.add(engine); + return list.hashCode(); } @@ -423,6 +486,16 @@ public int compareTo(ColumnStatistics other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetEngine()).compareTo(other.isSetEngine()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEngine()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.engine, other.engine); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -464,6 +537,14 @@ public String toString() { sb.append(this.isStatsCompliant); first = false; } + if (!first) sb.append(", "); + sb.append("engine:"); + if (this.engine == null) { + sb.append("null"); + } else { + sb.append(this.engine); + } + first = false; sb.append(")"); return sb.toString(); } @@ -478,6 +559,10 @@ public void validate() throws org.apache.thrift.TException { throw new org.apache.thrift.protocol.TProtocolException("Required field 'statsObj' is unset! Struct:" + toString()); } + if (!isSetEngine()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'engine' is unset! Struct:" + toString()); + } + // check for sub-struct validity if (statsDesc != null) { statsDesc.validate(); @@ -556,6 +641,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnStatistics st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // ENGINE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -591,6 +684,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnStatistics s oprot.writeBool(struct.isStatsCompliant); oprot.writeFieldEnd(); } + if (struct.engine != null) { + oprot.writeFieldBegin(ENGINE_FIELD_DESC); + oprot.writeString(struct.engine); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -616,6 +714,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics st _iter280.write(oprot); } } + oprot.writeString(struct.engine); BitSet optionals = new BitSet(); if (struct.isSetIsStatsCompliant()) { optionals.set(0); @@ -644,6 +743,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics str } } struct.setStatsObjIsSet(true); + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { struct.isStatsCompliant = iprot.readBool(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java index 9924f20fd4..f41d322ea0 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java @@ -44,6 +44,7 @@ private static final org.apache.thrift.protocol.TField GET_COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("get_col_stats", org.apache.thrift.protocol.TType.BOOL, (short)4); private static final org.apache.thrift.protocol.TField PROCESSOR_CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("processorCapabilities", org.apache.thrift.protocol.TType.LIST, (short)5); private static final org.apache.thrift.protocol.TField PROCESSOR_IDENTIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("processorIdentifier", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField ENGINE_FIELD_DESC = new org.apache.thrift.protocol.TField("engine", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +58,7 @@ private boolean get_col_stats; // optional private List processorCapabilities; // optional private String processorIdentifier; // optional + private String engine; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -65,7 +67,8 @@ NAMES((short)3, "names"), GET_COL_STATS((short)4, "get_col_stats"), PROCESSOR_CAPABILITIES((short)5, "processorCapabilities"), - PROCESSOR_IDENTIFIER((short)6, "processorIdentifier"); + PROCESSOR_IDENTIFIER((short)6, "processorIdentifier"), + ENGINE((short)7, "engine"); private static final Map byName = new HashMap(); @@ -92,6 +95,8 @@ public static _Fields findByThriftId(int fieldId) { return PROCESSOR_CAPABILITIES; case 6: // PROCESSOR_IDENTIFIER return PROCESSOR_IDENTIFIER; + case 7: // ENGINE + return ENGINE; default: return null; } @@ -134,7 +139,7 @@ public String getFieldName() { // isset id assignments private static final int __GET_COL_STATS_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.NAMES,_Fields.GET_COL_STATS,_Fields.PROCESSOR_CAPABILITIES,_Fields.PROCESSOR_IDENTIFIER}; + private static final _Fields optionals[] = {_Fields.NAMES,_Fields.GET_COL_STATS,_Fields.PROCESSOR_CAPABILITIES,_Fields.PROCESSOR_IDENTIFIER,_Fields.ENGINE}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -152,6 +157,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.PROCESSOR_IDENTIFIER, new org.apache.thrift.meta_data.FieldMetaData("processorIdentifier", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ENGINE, new org.apache.thrift.meta_data.FieldMetaData("engine", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPartitionsByNamesRequest.class, metaDataMap); } @@ -191,6 +198,9 @@ public GetPartitionsByNamesRequest(GetPartitionsByNamesRequest other) { if (other.isSetProcessorIdentifier()) { this.processorIdentifier = other.processorIdentifier; } + if (other.isSetEngine()) { + this.engine = other.engine; + } } public GetPartitionsByNamesRequest deepCopy() { @@ -206,6 +216,7 @@ public void clear() { this.get_col_stats = false; this.processorCapabilities = null; this.processorIdentifier = null; + this.engine = null; } public String getDb_name() { @@ -375,6 +386,29 @@ public void setProcessorIdentifierIsSet(boolean value) { } } + public String getEngine() { + return this.engine; + } + + public void setEngine(String engine) { + this.engine = engine; + } + + public void unsetEngine() { + this.engine = null; + } + + /** Returns true if field engine is set (has been assigned a value) and false otherwise */ + public boolean isSetEngine() { + return this.engine != null; + } + + public void setEngineIsSet(boolean value) { + if (!value) { + this.engine = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -425,6 +459,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case ENGINE: + if (value == null) { + unsetEngine(); + } else { + setEngine((String)value); + } + break; + } } @@ -448,6 +490,9 @@ public Object getFieldValue(_Fields field) { case PROCESSOR_IDENTIFIER: return getProcessorIdentifier(); + case ENGINE: + return getEngine(); + } throw new IllegalStateException(); } @@ -471,6 +516,8 @@ public boolean isSet(_Fields field) { return isSetProcessorCapabilities(); case PROCESSOR_IDENTIFIER: return isSetProcessorIdentifier(); + case ENGINE: + return isSetEngine(); } throw new IllegalStateException(); } @@ -542,6 +589,15 @@ public boolean equals(GetPartitionsByNamesRequest that) { return false; } + boolean this_present_engine = true && this.isSetEngine(); + boolean that_present_engine = true && that.isSetEngine(); + if (this_present_engine || that_present_engine) { + if (!(this_present_engine && that_present_engine)) + return false; + if (!this.engine.equals(that.engine)) + return false; + } + return true; } @@ -579,6 +635,11 @@ public int hashCode() { if (present_processorIdentifier) list.add(processorIdentifier); + boolean present_engine = true && (isSetEngine()); + list.add(present_engine); + if (present_engine) + list.add(engine); + return list.hashCode(); } @@ -650,6 +711,16 @@ public int compareTo(GetPartitionsByNamesRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetEngine()).compareTo(other.isSetEngine()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEngine()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.engine, other.engine); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -721,6 +792,16 @@ public String toString() { } first = false; } + if (isSetEngine()) { + if (!first) sb.append(", "); + sb.append("engine:"); + if (this.engine == null) { + sb.append("null"); + } else { + sb.append(this.engine); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -842,6 +923,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsByName org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // ENGINE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -905,6 +994,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsByNam oprot.writeFieldEnd(); } } + if (struct.engine != null) { + if (struct.isSetEngine()) { + oprot.writeFieldBegin(ENGINE_FIELD_DESC); + oprot.writeString(struct.engine); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -937,7 +1033,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsByName if (struct.isSetProcessorIdentifier()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetEngine()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); @@ -962,6 +1061,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsByName if (struct.isSetProcessorIdentifier()) { oprot.writeString(struct.processorIdentifier); } + if (struct.isSetEngine()) { + oprot.writeString(struct.engine); + } } @Override @@ -971,7 +1073,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsByNames struct.setDb_nameIsSet(true); struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list572 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); @@ -1006,6 +1108,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsByNames struct.processorIdentifier = iprot.readString(); struct.setProcessorIdentifierIsSet(true); } + if (incoming.get(4)) { + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java index f751e397f5..18d25a1c58 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java @@ -46,6 +46,7 @@ private static final org.apache.thrift.protocol.TField GET_COLUMN_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("getColumnStats", org.apache.thrift.protocol.TType.BOOL, (short)7); private static final org.apache.thrift.protocol.TField PROCESSOR_CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("processorCapabilities", org.apache.thrift.protocol.TType.LIST, (short)8); private static final org.apache.thrift.protocol.TField PROCESSOR_IDENTIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("processorIdentifier", org.apache.thrift.protocol.TType.STRING, (short)9); + private static final org.apache.thrift.protocol.TField ENGINE_FIELD_DESC = new org.apache.thrift.protocol.TField("engine", org.apache.thrift.protocol.TType.STRING, (short)10); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -61,6 +62,7 @@ private boolean getColumnStats; // optional private List processorCapabilities; // optional private String processorIdentifier; // optional + private String engine; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -71,7 +73,8 @@ VALID_WRITE_ID_LIST((short)6, "validWriteIdList"), GET_COLUMN_STATS((short)7, "getColumnStats"), PROCESSOR_CAPABILITIES((short)8, "processorCapabilities"), - PROCESSOR_IDENTIFIER((short)9, "processorIdentifier"); + PROCESSOR_IDENTIFIER((short)9, "processorIdentifier"), + ENGINE((short)10, "engine"); private static final Map byName = new HashMap(); @@ -102,6 +105,8 @@ public static _Fields findByThriftId(int fieldId) { return PROCESSOR_CAPABILITIES; case 9: // PROCESSOR_IDENTIFIER return PROCESSOR_IDENTIFIER; + case 10: // ENGINE + return ENGINE; default: return null; } @@ -144,7 +149,7 @@ public String getFieldName() { // isset id assignments private static final int __GETCOLUMNSTATS_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST,_Fields.GET_COLUMN_STATS,_Fields.PROCESSOR_CAPABILITIES,_Fields.PROCESSOR_IDENTIFIER}; + private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST,_Fields.GET_COLUMN_STATS,_Fields.PROCESSOR_CAPABILITIES,_Fields.PROCESSOR_IDENTIFIER,_Fields.ENGINE}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -165,6 +170,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.PROCESSOR_IDENTIFIER, new org.apache.thrift.meta_data.FieldMetaData("processorIdentifier", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ENGINE, new org.apache.thrift.meta_data.FieldMetaData("engine", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableRequest.class, metaDataMap); } @@ -209,6 +216,9 @@ public GetTableRequest(GetTableRequest other) { if (other.isSetProcessorIdentifier()) { this.processorIdentifier = other.processorIdentifier; } + if (other.isSetEngine()) { + this.engine = other.engine; + } } public GetTableRequest deepCopy() { @@ -226,6 +236,7 @@ public void clear() { this.getColumnStats = false; this.processorCapabilities = null; this.processorIdentifier = null; + this.engine = null; } public String getDbName() { @@ -426,6 +437,29 @@ public void setProcessorIdentifierIsSet(boolean value) { } } + public String getEngine() { + return this.engine; + } + + public void setEngine(String engine) { + this.engine = engine; + } + + public void unsetEngine() { + this.engine = null; + } + + /** Returns true if field engine is set (has been assigned a value) and false otherwise */ + public boolean isSetEngine() { + return this.engine != null; + } + + public void setEngineIsSet(boolean value) { + if (!value) { + this.engine = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -492,6 +526,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case ENGINE: + if (value == null) { + unsetEngine(); + } else { + setEngine((String)value); + } + break; + } } @@ -521,6 +563,9 @@ public Object getFieldValue(_Fields field) { case PROCESSOR_IDENTIFIER: return getProcessorIdentifier(); + case ENGINE: + return getEngine(); + } throw new IllegalStateException(); } @@ -548,6 +593,8 @@ public boolean isSet(_Fields field) { return isSetProcessorCapabilities(); case PROCESSOR_IDENTIFIER: return isSetProcessorIdentifier(); + case ENGINE: + return isSetEngine(); } throw new IllegalStateException(); } @@ -637,6 +684,15 @@ public boolean equals(GetTableRequest that) { return false; } + boolean this_present_engine = true && this.isSetEngine(); + boolean that_present_engine = true && that.isSetEngine(); + if (this_present_engine || that_present_engine) { + if (!(this_present_engine && that_present_engine)) + return false; + if (!this.engine.equals(that.engine)) + return false; + } + return true; } @@ -684,6 +740,11 @@ public int hashCode() { if (present_processorIdentifier) list.add(processorIdentifier); + boolean present_engine = true && (isSetEngine()); + list.add(present_engine); + if (present_engine) + list.add(engine); + return list.hashCode(); } @@ -775,6 +836,16 @@ public int compareTo(GetTableRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetEngine()).compareTo(other.isSetEngine()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEngine()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.engine, other.engine); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -866,6 +937,16 @@ public String toString() { } first = false; } + if (isSetEngine()) { + if (!first) sb.append(", "); + sb.append("engine:"); + if (this.engine == null) { + sb.append("null"); + } else { + sb.append(this.engine); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -997,6 +1078,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableRequest str org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 10: // ENGINE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1067,6 +1156,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableRequest st oprot.writeFieldEnd(); } } + if (struct.engine != null) { + if (struct.isSetEngine()) { + oprot.writeFieldBegin(ENGINE_FIELD_DESC); + oprot.writeString(struct.engine); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1105,7 +1201,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTableRequest str if (struct.isSetProcessorIdentifier()) { optionals.set(5); } - oprot.writeBitSet(optionals, 6); + if (struct.isSetEngine()) { + optionals.set(6); + } + oprot.writeBitSet(optionals, 7); if (struct.isSetCapabilities()) { struct.capabilities.write(oprot); } @@ -1130,6 +1229,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTableRequest str if (struct.isSetProcessorIdentifier()) { oprot.writeString(struct.processorIdentifier); } + if (struct.isSetEngine()) { + oprot.writeString(struct.engine); + } } @Override @@ -1139,7 +1241,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTableRequest stru struct.setDbNameIsSet(true); struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); - BitSet incoming = iprot.readBitSet(6); + BitSet incoming = iprot.readBitSet(7); if (incoming.get(0)) { struct.capabilities = new ClientCapabilities(); struct.capabilities.read(iprot); @@ -1174,6 +1276,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTableRequest stru struct.processorIdentifier = iprot.readString(); struct.setProcessorIdentifierIsSet(true); } + if (incoming.get(6)) { + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java index 54beb0e1c9..8dec4e8a68 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java @@ -44,6 +44,7 @@ private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)4); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5); private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField ENGINE_FIELD_DESC = new org.apache.thrift.protocol.TField("engine", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +58,7 @@ private List partNames; // required private String catName; // optional private String validWriteIdList; // optional + private String engine; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -65,7 +67,8 @@ COL_NAMES((short)3, "colNames"), PART_NAMES((short)4, "partNames"), CAT_NAME((short)5, "catName"), - VALID_WRITE_ID_LIST((short)6, "validWriteIdList"); + VALID_WRITE_ID_LIST((short)6, "validWriteIdList"), + ENGINE((short)7, "engine"); private static final Map byName = new HashMap(); @@ -92,6 +95,8 @@ public static _Fields findByThriftId(int fieldId) { return CAT_NAME; case 6: // VALID_WRITE_ID_LIST return VALID_WRITE_ID_LIST; + case 7: // ENGINE + return ENGINE; default: return null; } @@ -150,6 +155,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ENGINE, new org.apache.thrift.meta_data.FieldMetaData("engine", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsRequest.class, metaDataMap); } @@ -161,13 +168,15 @@ public PartitionsStatsRequest( String dbName, String tblName, List colNames, - List partNames) + List partNames, + String engine) { this(); this.dbName = dbName; this.tblName = tblName; this.colNames = colNames; this.partNames = partNames; + this.engine = engine; } /** @@ -194,6 +203,9 @@ public PartitionsStatsRequest(PartitionsStatsRequest other) { if (other.isSetValidWriteIdList()) { this.validWriteIdList = other.validWriteIdList; } + if (other.isSetEngine()) { + this.engine = other.engine; + } } public PartitionsStatsRequest deepCopy() { @@ -208,6 +220,7 @@ public void clear() { this.partNames = null; this.catName = null; this.validWriteIdList = null; + this.engine = null; } public String getDbName() { @@ -378,6 +391,29 @@ public void setValidWriteIdListIsSet(boolean value) { } } + public String getEngine() { + return this.engine; + } + + public void setEngine(String engine) { + this.engine = engine; + } + + public void unsetEngine() { + this.engine = null; + } + + /** Returns true if field engine is set (has been assigned a value) and false otherwise */ + public boolean isSetEngine() { + return this.engine != null; + } + + public void setEngineIsSet(boolean value) { + if (!value) { + this.engine = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -428,6 +464,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case ENGINE: + if (value == null) { + unsetEngine(); + } else { + setEngine((String)value); + } + break; + } } @@ -451,6 +495,9 @@ public Object getFieldValue(_Fields field) { case VALID_WRITE_ID_LIST: return getValidWriteIdList(); + case ENGINE: + return getEngine(); + } throw new IllegalStateException(); } @@ -474,6 +521,8 @@ public boolean isSet(_Fields field) { return isSetCatName(); case VALID_WRITE_ID_LIST: return isSetValidWriteIdList(); + case ENGINE: + return isSetEngine(); } throw new IllegalStateException(); } @@ -545,6 +594,15 @@ public boolean equals(PartitionsStatsRequest that) { return false; } + boolean this_present_engine = true && this.isSetEngine(); + boolean that_present_engine = true && that.isSetEngine(); + if (this_present_engine || that_present_engine) { + if (!(this_present_engine && that_present_engine)) + return false; + if (!this.engine.equals(that.engine)) + return false; + } + return true; } @@ -582,6 +640,11 @@ public int hashCode() { if (present_validWriteIdList) list.add(validWriteIdList); + boolean present_engine = true && (isSetEngine()); + list.add(present_engine); + if (present_engine) + list.add(engine); + return list.hashCode(); } @@ -653,6 +716,16 @@ public int compareTo(PartitionsStatsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetEngine()).compareTo(other.isSetEngine()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEngine()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.engine, other.engine); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -724,6 +797,14 @@ public String toString() { } first = false; } + if (!first) sb.append(", "); + sb.append("engine:"); + if (this.engine == null) { + sb.append("null"); + } else { + sb.append(this.engine); + } + first = false; sb.append(")"); return sb.toString(); } @@ -746,6 +827,10 @@ public void validate() throws org.apache.thrift.TException { throw new org.apache.thrift.protocol.TProtocolException("Required field 'partNames' is unset! Struct:" + toString()); } + if (!isSetEngine()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'engine' is unset! Struct:" + toString()); + } + // check for sub-struct validity } @@ -851,6 +936,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // ENGINE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -912,6 +1005,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq oprot.writeFieldEnd(); } } + if (struct.engine != null) { + oprot.writeFieldBegin(ENGINE_FIELD_DESC); + oprot.writeString(struct.engine); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -945,6 +1043,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequ oprot.writeString(_iter483); } } + oprot.writeString(struct.engine); BitSet optionals = new BitSet(); if (struct.isSetCatName()) { optionals.set(0); @@ -990,6 +1089,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsReque } } struct.setPartNamesIsSet(true); + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.catName = iprot.readString(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java index 8009eb451c..612c5c14da 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java @@ -42,6 +42,7 @@ private static final org.apache.thrift.protocol.TField NEED_MERGE_FIELD_DESC = new org.apache.thrift.protocol.TField("needMerge", org.apache.thrift.protocol.TType.BOOL, (short)2); private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)3); private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField ENGINE_FIELD_DESC = new org.apache.thrift.protocol.TField("engine", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,13 +54,15 @@ private boolean needMerge; // optional private long writeId; // optional private String validWriteIdList; // optional + private String engine; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { COL_STATS((short)1, "colStats"), NEED_MERGE((short)2, "needMerge"), WRITE_ID((short)3, "writeId"), - VALID_WRITE_ID_LIST((short)4, "validWriteIdList"); + VALID_WRITE_ID_LIST((short)4, "validWriteIdList"), + ENGINE((short)5, "engine"); private static final Map byName = new HashMap(); @@ -82,6 +85,8 @@ public static _Fields findByThriftId(int fieldId) { return WRITE_ID; case 4: // VALID_WRITE_ID_LIST return VALID_WRITE_ID_LIST; + case 5: // ENGINE + return ENGINE; default: return null; } @@ -138,6 +143,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ENGINE, new org.apache.thrift.meta_data.FieldMetaData("engine", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SetPartitionsStatsRequest.class, metaDataMap); } @@ -148,10 +155,12 @@ public SetPartitionsStatsRequest() { } public SetPartitionsStatsRequest( - List colStats) + List colStats, + String engine) { this(); this.colStats = colStats; + this.engine = engine; } /** @@ -171,6 +180,9 @@ public SetPartitionsStatsRequest(SetPartitionsStatsRequest other) { if (other.isSetValidWriteIdList()) { this.validWriteIdList = other.validWriteIdList; } + if (other.isSetEngine()) { + this.engine = other.engine; + } } public SetPartitionsStatsRequest deepCopy() { @@ -185,6 +197,7 @@ public void clear() { this.writeId = -1L; this.validWriteIdList = null; + this.engine = null; } public int getColStatsSize() { @@ -292,6 +305,29 @@ public void setValidWriteIdListIsSet(boolean value) { } } + public String getEngine() { + return this.engine; + } + + public void setEngine(String engine) { + this.engine = engine; + } + + public void unsetEngine() { + this.engine = null; + } + + /** Returns true if field engine is set (has been assigned a value) and false otherwise */ + public boolean isSetEngine() { + return this.engine != null; + } + + public void setEngineIsSet(boolean value) { + if (!value) { + this.engine = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case COL_STATS: @@ -326,6 +362,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case ENGINE: + if (value == null) { + unsetEngine(); + } else { + setEngine((String)value); + } + break; + } } @@ -343,6 +387,9 @@ public Object getFieldValue(_Fields field) { case VALID_WRITE_ID_LIST: return getValidWriteIdList(); + case ENGINE: + return getEngine(); + } throw new IllegalStateException(); } @@ -362,6 +409,8 @@ public boolean isSet(_Fields field) { return isSetWriteId(); case VALID_WRITE_ID_LIST: return isSetValidWriteIdList(); + case ENGINE: + return isSetEngine(); } throw new IllegalStateException(); } @@ -415,6 +464,15 @@ public boolean equals(SetPartitionsStatsRequest that) { return false; } + boolean this_present_engine = true && this.isSetEngine(); + boolean that_present_engine = true && that.isSetEngine(); + if (this_present_engine || that_present_engine) { + if (!(this_present_engine && that_present_engine)) + return false; + if (!this.engine.equals(that.engine)) + return false; + } + return true; } @@ -442,6 +500,11 @@ public int hashCode() { if (present_validWriteIdList) list.add(validWriteIdList); + boolean present_engine = true && (isSetEngine()); + list.add(present_engine); + if (present_engine) + list.add(engine); + return list.hashCode(); } @@ -493,6 +556,16 @@ public int compareTo(SetPartitionsStatsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetEngine()).compareTo(other.isSetEngine()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEngine()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.engine, other.engine); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -542,6 +615,14 @@ public String toString() { } first = false; } + if (!first) sb.append(", "); + sb.append("engine:"); + if (this.engine == null) { + sb.append("null"); + } else { + sb.append(this.engine); + } + first = false; sb.append(")"); return sb.toString(); } @@ -552,6 +633,10 @@ public void validate() throws org.apache.thrift.TException { throw new org.apache.thrift.protocol.TProtocolException("Required field 'colStats' is unset! Struct:" + toString()); } + if (!isSetEngine()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'engine' is unset! Struct:" + toString()); + } + // check for sub-struct validity } @@ -634,6 +719,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SetPartitionsStatsR org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // ENGINE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -676,6 +769,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SetPartitionsStats oprot.writeFieldEnd(); } } + if (struct.engine != null) { + oprot.writeFieldBegin(ENGINE_FIELD_DESC); + oprot.writeString(struct.engine); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -700,6 +798,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsR _iter304.write(oprot); } } + oprot.writeString(struct.engine); BitSet optionals = new BitSet(); if (struct.isSetNeedMerge()) { optionals.set(0); @@ -737,6 +836,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsRe } } struct.setColStatsIsSet(true); + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.needMerge = iprot.readBool(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java index 220b852234..9b4b26dc16 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField ENGINE_FIELD_DESC = new org.apache.thrift.protocol.TField("engine", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private List colNames; // required private String catName; // optional private String validWriteIdList; // optional + private String engine; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +64,8 @@ TBL_NAME((short)2, "tblName"), COL_NAMES((short)3, "colNames"), CAT_NAME((short)4, "catName"), - VALID_WRITE_ID_LIST((short)5, "validWriteIdList"); + VALID_WRITE_ID_LIST((short)5, "validWriteIdList"), + ENGINE((short)6, "engine"); private static final Map byName = new HashMap(); @@ -87,6 +90,8 @@ public static _Fields findByThriftId(int fieldId) { return CAT_NAME; case 5: // VALID_WRITE_ID_LIST return VALID_WRITE_ID_LIST; + case 6: // ENGINE + return ENGINE; default: return null; } @@ -142,6 +147,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ENGINE, new org.apache.thrift.meta_data.FieldMetaData("engine", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableStatsRequest.class, metaDataMap); } @@ -152,12 +159,14 @@ public TableStatsRequest() { public TableStatsRequest( String dbName, String tblName, - List colNames) + List colNames, + String engine) { this(); this.dbName = dbName; this.tblName = tblName; this.colNames = colNames; + this.engine = engine; } /** @@ -180,6 +189,9 @@ public TableStatsRequest(TableStatsRequest other) { if (other.isSetValidWriteIdList()) { this.validWriteIdList = other.validWriteIdList; } + if (other.isSetEngine()) { + this.engine = other.engine; + } } public TableStatsRequest deepCopy() { @@ -193,6 +205,7 @@ public void clear() { this.colNames = null; this.catName = null; this.validWriteIdList = null; + this.engine = null; } public String getDbName() { @@ -325,6 +338,29 @@ public void setValidWriteIdListIsSet(boolean value) { } } + public String getEngine() { + return this.engine; + } + + public void setEngine(String engine) { + this.engine = engine; + } + + public void unsetEngine() { + this.engine = null; + } + + /** Returns true if field engine is set (has been assigned a value) and false otherwise */ + public boolean isSetEngine() { + return this.engine != null; + } + + public void setEngineIsSet(boolean value) { + if (!value) { + this.engine = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -367,6 +403,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case ENGINE: + if (value == null) { + unsetEngine(); + } else { + setEngine((String)value); + } + break; + } } @@ -387,6 +431,9 @@ public Object getFieldValue(_Fields field) { case VALID_WRITE_ID_LIST: return getValidWriteIdList(); + case ENGINE: + return getEngine(); + } throw new IllegalStateException(); } @@ -408,6 +455,8 @@ public boolean isSet(_Fields field) { return isSetCatName(); case VALID_WRITE_ID_LIST: return isSetValidWriteIdList(); + case ENGINE: + return isSetEngine(); } throw new IllegalStateException(); } @@ -470,6 +519,15 @@ public boolean equals(TableStatsRequest that) { return false; } + boolean this_present_engine = true && this.isSetEngine(); + boolean that_present_engine = true && that.isSetEngine(); + if (this_present_engine || that_present_engine) { + if (!(this_present_engine && that_present_engine)) + return false; + if (!this.engine.equals(that.engine)) + return false; + } + return true; } @@ -502,6 +560,11 @@ public int hashCode() { if (present_validWriteIdList) list.add(validWriteIdList); + boolean present_engine = true && (isSetEngine()); + list.add(present_engine); + if (present_engine) + list.add(engine); + return list.hashCode(); } @@ -563,6 +626,16 @@ public int compareTo(TableStatsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetEngine()).compareTo(other.isSetEngine()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEngine()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.engine, other.engine); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -626,6 +699,14 @@ public String toString() { } first = false; } + if (!first) sb.append(", "); + sb.append("engine:"); + if (this.engine == null) { + sb.append("null"); + } else { + sb.append(this.engine); + } + first = false; sb.append(")"); return sb.toString(); } @@ -644,6 +725,10 @@ public void validate() throws org.apache.thrift.TException { throw new org.apache.thrift.protocol.TProtocolException("Required field 'colNames' is unset! Struct:" + toString()); } + if (!isSetEngine()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'engine' is unset! Struct:" + toString()); + } + // check for sub-struct validity } @@ -731,6 +816,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsRequest s org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // ENGINE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -780,6 +873,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsRequest oprot.writeFieldEnd(); } } + if (struct.engine != null) { + oprot.writeFieldBegin(ENGINE_FIELD_DESC); + oprot.writeString(struct.engine); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -806,6 +904,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest s oprot.writeString(_iter470); } } + oprot.writeString(struct.engine); BitSet optionals = new BitSet(); if (struct.isSetCatName()) { optionals.set(0); @@ -840,6 +939,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest st } } struct.setColNamesIsSet(true); + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.catName = iprot.readString(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index f4218ee042..ef0cce88a6 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -272,9 +272,9 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; - public boolean delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; + public boolean delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; - public boolean delete_table_column_statistics(String db_name, String tbl_name, String col_name) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; + public boolean delete_table_column_statistics(String db_name, String tbl_name, String col_name, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; public void create_function(Function func) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; @@ -726,9 +726,9 @@ public void set_aggr_stats_for(SetPartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, String engine, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void delete_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void delete_table_column_statistics(String db_name, String tbl_name, String col_name, String engine, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void create_function(Function func, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -4455,19 +4455,20 @@ public boolean recv_set_aggr_stats_for() throws NoSuchObjectException, InvalidOb throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "set_aggr_stats_for failed: unknown result"); } - public boolean delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException + public boolean delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException { - send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name); + send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name, engine); return recv_delete_partition_column_statistics(); } - public void send_delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name) throws org.apache.thrift.TException + public void send_delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, String engine) throws org.apache.thrift.TException { delete_partition_column_statistics_args args = new delete_partition_column_statistics_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_name(part_name); args.setCol_name(col_name); + args.setEngine(engine); sendBase("delete_partition_column_statistics", args); } @@ -4493,18 +4494,19 @@ public boolean recv_delete_partition_column_statistics() throws NoSuchObjectExce throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "delete_partition_column_statistics failed: unknown result"); } - public boolean delete_table_column_statistics(String db_name, String tbl_name, String col_name) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException + public boolean delete_table_column_statistics(String db_name, String tbl_name, String col_name, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException { - send_delete_table_column_statistics(db_name, tbl_name, col_name); + send_delete_table_column_statistics(db_name, tbl_name, col_name, engine); return recv_delete_table_column_statistics(); } - public void send_delete_table_column_statistics(String db_name, String tbl_name, String col_name) throws org.apache.thrift.TException + public void send_delete_table_column_statistics(String db_name, String tbl_name, String col_name, String engine) throws org.apache.thrift.TException { delete_table_column_statistics_args args = new delete_table_column_statistics_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setCol_name(col_name); + args.setEngine(engine); sendBase("delete_table_column_statistics", args); } @@ -11500,9 +11502,9 @@ public boolean getResult() throws NoSuchObjectException, InvalidObjectException, } } - public void delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, String engine, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - delete_partition_column_statistics_call method_call = new delete_partition_column_statistics_call(db_name, tbl_name, part_name, col_name, resultHandler, this, ___protocolFactory, ___transport); + delete_partition_column_statistics_call method_call = new delete_partition_column_statistics_call(db_name, tbl_name, part_name, col_name, engine, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -11512,12 +11514,14 @@ public void delete_partition_column_statistics(String db_name, String tbl_name, private String tbl_name; private String part_name; private String col_name; - public delete_partition_column_statistics_call(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String engine; + public delete_partition_column_statistics_call(String db_name, String tbl_name, String part_name, String col_name, String engine, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_name = part_name; this.col_name = col_name; + this.engine = engine; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -11527,6 +11531,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setTbl_name(tbl_name); args.setPart_name(part_name); args.setCol_name(col_name); + args.setEngine(engine); args.write(prot); prot.writeMessageEnd(); } @@ -11541,9 +11546,9 @@ public boolean getResult() throws NoSuchObjectException, MetaException, InvalidO } } - public void delete_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void delete_table_column_statistics(String db_name, String tbl_name, String col_name, String engine, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - delete_table_column_statistics_call method_call = new delete_table_column_statistics_call(db_name, tbl_name, col_name, resultHandler, this, ___protocolFactory, ___transport); + delete_table_column_statistics_call method_call = new delete_table_column_statistics_call(db_name, tbl_name, col_name, engine, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -11552,11 +11557,13 @@ public void delete_table_column_statistics(String db_name, String tbl_name, Stri private String db_name; private String tbl_name; private String col_name; - public delete_table_column_statistics_call(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String engine; + public delete_table_column_statistics_call(String db_name, String tbl_name, String col_name, String engine, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.col_name = col_name; + this.engine = engine; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -11565,6 +11572,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setCol_name(col_name); + args.setEngine(engine); args.write(prot); prot.writeMessageEnd(); } @@ -18422,7 +18430,7 @@ protected boolean isOneway() { public delete_partition_column_statistics_result getResult(I iface, delete_partition_column_statistics_args args) throws org.apache.thrift.TException { delete_partition_column_statistics_result result = new delete_partition_column_statistics_result(); try { - result.success = iface.delete_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name); + result.success = iface.delete_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name, args.engine); result.setSuccessIsSet(true); } catch (NoSuchObjectException o1) { result.o1 = o1; @@ -18453,7 +18461,7 @@ protected boolean isOneway() { public delete_table_column_statistics_result getResult(I iface, delete_table_column_statistics_args args) throws org.apache.thrift.TException { delete_table_column_statistics_result result = new delete_table_column_statistics_result(); try { - result.success = iface.delete_table_column_statistics(args.db_name, args.tbl_name, args.col_name); + result.success = iface.delete_table_column_statistics(args.db_name, args.tbl_name, args.col_name, args.engine); result.setSuccessIsSet(true); } catch (NoSuchObjectException o1) { result.o1 = o1; @@ -28675,7 +28683,7 @@ protected boolean isOneway() { } public void start(I iface, delete_partition_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.delete_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name,resultHandler); + iface.delete_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name, args.engine,resultHandler); } } @@ -28748,7 +28756,7 @@ protected boolean isOneway() { } public void start(I iface, delete_table_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.delete_table_column_statistics(args.db_name, args.tbl_name, args.col_name,resultHandler); + iface.delete_table_column_statistics(args.db_name, args.tbl_name, args.col_name, args.engine,resultHandler); } } @@ -160470,6 +160478,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_aggr_stats_for_r private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PART_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("part_name", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField COL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("col_name", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField ENGINE_FIELD_DESC = new org.apache.thrift.protocol.TField("engine", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -160481,13 +160490,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_aggr_stats_for_r private String tbl_name; // required private String part_name; // required private String col_name; // required + private String engine; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), PART_NAME((short)3, "part_name"), - COL_NAME((short)4, "col_name"); + COL_NAME((short)4, "col_name"), + ENGINE((short)5, "engine"); private static final Map byName = new HashMap(); @@ -160510,6 +160521,8 @@ public static _Fields findByThriftId(int fieldId) { return PART_NAME; case 4: // COL_NAME return COL_NAME; + case 5: // ENGINE + return ENGINE; default: return null; } @@ -160561,6 +160574,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.COL_NAME, new org.apache.thrift.meta_data.FieldMetaData("col_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ENGINE, new org.apache.thrift.meta_data.FieldMetaData("engine", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(delete_partition_column_statistics_args.class, metaDataMap); } @@ -160572,13 +160587,15 @@ public delete_partition_column_statistics_args( String db_name, String tbl_name, String part_name, - String col_name) + String col_name, + String engine) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.part_name = part_name; this.col_name = col_name; + this.engine = engine; } /** @@ -160597,6 +160614,9 @@ public delete_partition_column_statistics_args(delete_partition_column_statistic if (other.isSetCol_name()) { this.col_name = other.col_name; } + if (other.isSetEngine()) { + this.engine = other.engine; + } } public delete_partition_column_statistics_args deepCopy() { @@ -160609,6 +160629,7 @@ public void clear() { this.tbl_name = null; this.part_name = null; this.col_name = null; + this.engine = null; } public String getDb_name() { @@ -160703,6 +160724,29 @@ public void setCol_nameIsSet(boolean value) { } } + public String getEngine() { + return this.engine; + } + + public void setEngine(String engine) { + this.engine = engine; + } + + public void unsetEngine() { + this.engine = null; + } + + /** Returns true if field engine is set (has been assigned a value) and false otherwise */ + public boolean isSetEngine() { + return this.engine != null; + } + + public void setEngineIsSet(boolean value) { + if (!value) { + this.engine = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -160737,6 +160781,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case ENGINE: + if (value == null) { + unsetEngine(); + } else { + setEngine((String)value); + } + break; + } } @@ -160754,6 +160806,9 @@ public Object getFieldValue(_Fields field) { case COL_NAME: return getCol_name(); + case ENGINE: + return getEngine(); + } throw new IllegalStateException(); } @@ -160773,6 +160828,8 @@ public boolean isSet(_Fields field) { return isSetPart_name(); case COL_NAME: return isSetCol_name(); + case ENGINE: + return isSetEngine(); } throw new IllegalStateException(); } @@ -160826,6 +160883,15 @@ public boolean equals(delete_partition_column_statistics_args that) { return false; } + boolean this_present_engine = true && this.isSetEngine(); + boolean that_present_engine = true && that.isSetEngine(); + if (this_present_engine || that_present_engine) { + if (!(this_present_engine && that_present_engine)) + return false; + if (!this.engine.equals(that.engine)) + return false; + } + return true; } @@ -160853,6 +160919,11 @@ public int hashCode() { if (present_col_name) list.add(col_name); + boolean present_engine = true && (isSetEngine()); + list.add(present_engine); + if (present_engine) + list.add(engine); + return list.hashCode(); } @@ -160904,6 +160975,16 @@ public int compareTo(delete_partition_column_statistics_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetEngine()).compareTo(other.isSetEngine()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEngine()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.engine, other.engine); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -160955,6 +161036,14 @@ public String toString() { sb.append(this.col_name); } first = false; + if (!first) sb.append(", "); + sb.append("engine:"); + if (this.engine == null) { + sb.append("null"); + } else { + sb.append(this.engine); + } + first = false; sb.append(")"); return sb.toString(); } @@ -161030,6 +161119,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, delete_partition_co org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // ENGINE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -161063,6 +161160,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, delete_partition_c oprot.writeString(struct.col_name); oprot.writeFieldEnd(); } + if (struct.engine != null) { + oprot.writeFieldBegin(ENGINE_FIELD_DESC); + oprot.writeString(struct.engine); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -161093,7 +161195,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, delete_partition_co if (struct.isSetCol_name()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetEngine()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -161106,12 +161211,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, delete_partition_co if (struct.isSetCol_name()) { oprot.writeString(struct.col_name); } + if (struct.isSetEngine()) { + oprot.writeString(struct.engine); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, delete_partition_column_statistics_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -161128,6 +161236,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, delete_partition_col struct.col_name = iprot.readString(); struct.setCol_nameIsSet(true); } + if (incoming.get(4)) { + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); + } } } @@ -161921,6 +162033,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, delete_partition_col private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField COL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("col_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField ENGINE_FIELD_DESC = new org.apache.thrift.protocol.TField("engine", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -161931,12 +162044,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, delete_partition_col private String db_name; // required private String tbl_name; // required private String col_name; // required + private String engine; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - COL_NAME((short)3, "col_name"); + COL_NAME((short)3, "col_name"), + ENGINE((short)4, "engine"); private static final Map byName = new HashMap(); @@ -161957,6 +162072,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // COL_NAME return COL_NAME; + case 4: // ENGINE + return ENGINE; default: return null; } @@ -162006,6 +162123,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.COL_NAME, new org.apache.thrift.meta_data.FieldMetaData("col_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ENGINE, new org.apache.thrift.meta_data.FieldMetaData("engine", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(delete_table_column_statistics_args.class, metaDataMap); } @@ -162016,12 +162135,14 @@ public delete_table_column_statistics_args() { public delete_table_column_statistics_args( String db_name, String tbl_name, - String col_name) + String col_name, + String engine) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.col_name = col_name; + this.engine = engine; } /** @@ -162037,6 +162158,9 @@ public delete_table_column_statistics_args(delete_table_column_statistics_args o if (other.isSetCol_name()) { this.col_name = other.col_name; } + if (other.isSetEngine()) { + this.engine = other.engine; + } } public delete_table_column_statistics_args deepCopy() { @@ -162048,6 +162172,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.col_name = null; + this.engine = null; } public String getDb_name() { @@ -162119,6 +162244,29 @@ public void setCol_nameIsSet(boolean value) { } } + public String getEngine() { + return this.engine; + } + + public void setEngine(String engine) { + this.engine = engine; + } + + public void unsetEngine() { + this.engine = null; + } + + /** Returns true if field engine is set (has been assigned a value) and false otherwise */ + public boolean isSetEngine() { + return this.engine != null; + } + + public void setEngineIsSet(boolean value) { + if (!value) { + this.engine = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -162145,6 +162293,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case ENGINE: + if (value == null) { + unsetEngine(); + } else { + setEngine((String)value); + } + break; + } } @@ -162159,6 +162315,9 @@ public Object getFieldValue(_Fields field) { case COL_NAME: return getCol_name(); + case ENGINE: + return getEngine(); + } throw new IllegalStateException(); } @@ -162176,6 +162335,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case COL_NAME: return isSetCol_name(); + case ENGINE: + return isSetEngine(); } throw new IllegalStateException(); } @@ -162220,6 +162381,15 @@ public boolean equals(delete_table_column_statistics_args that) { return false; } + boolean this_present_engine = true && this.isSetEngine(); + boolean that_present_engine = true && that.isSetEngine(); + if (this_present_engine || that_present_engine) { + if (!(this_present_engine && that_present_engine)) + return false; + if (!this.engine.equals(that.engine)) + return false; + } + return true; } @@ -162242,6 +162412,11 @@ public int hashCode() { if (present_col_name) list.add(col_name); + boolean present_engine = true && (isSetEngine()); + list.add(present_engine); + if (present_engine) + list.add(engine); + return list.hashCode(); } @@ -162283,6 +162458,16 @@ public int compareTo(delete_table_column_statistics_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetEngine()).compareTo(other.isSetEngine()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEngine()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.engine, other.engine); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -162326,6 +162511,14 @@ public String toString() { sb.append(this.col_name); } first = false; + if (!first) sb.append(", "); + sb.append("engine:"); + if (this.engine == null) { + sb.append("null"); + } else { + sb.append(this.engine); + } + first = false; sb.append(")"); return sb.toString(); } @@ -162393,6 +162586,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, delete_table_column org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // ENGINE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -162421,6 +162622,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, delete_table_colum oprot.writeString(struct.col_name); oprot.writeFieldEnd(); } + if (struct.engine != null) { + oprot.writeFieldBegin(ENGINE_FIELD_DESC); + oprot.writeString(struct.engine); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -162448,7 +162654,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, delete_table_column if (struct.isSetCol_name()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetEngine()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -162458,12 +162667,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, delete_table_column if (struct.isSetCol_name()) { oprot.writeString(struct.col_name); } + if (struct.isSetEngine()) { + oprot.writeString(struct.engine); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, delete_table_column_statistics_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -162476,6 +162688,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, delete_table_column_ struct.col_name = iprot.readString(); struct.setCol_nameIsSet(true); } + if (incoming.get(3)) { + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index e826dedc95..8d0542d8ee 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -974,24 +974,26 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param string $tbl_name * @param string $part_name * @param string $col_name + * @param string $engine * @return bool * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException * @throws \metastore\InvalidObjectException * @throws \metastore\InvalidInputException */ - public function delete_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name); + public function delete_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name, $engine); /** * @param string $db_name * @param string $tbl_name * @param string $col_name + * @param string $engine * @return bool * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException * @throws \metastore\InvalidObjectException * @throws \metastore\InvalidInputException */ - public function delete_table_column_statistics($db_name, $tbl_name, $col_name); + public function delete_table_column_statistics($db_name, $tbl_name, $col_name, $engine); /** * @param \metastore\Function $func * @throws \metastore\AlreadyExistsException @@ -8382,19 +8384,20 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("set_aggr_stats_for failed: unknown result"); } - public function delete_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name) + public function delete_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name, $engine) { - $this->send_delete_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name); + $this->send_delete_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name, $engine); return $this->recv_delete_partition_column_statistics(); } - public function send_delete_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name) + public function send_delete_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name, $engine) { $args = new \metastore\ThriftHiveMetastore_delete_partition_column_statistics_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->part_name = $part_name; $args->col_name = $col_name; + $args->engine = $engine; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -8448,18 +8451,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("delete_partition_column_statistics failed: unknown result"); } - public function delete_table_column_statistics($db_name, $tbl_name, $col_name) + public function delete_table_column_statistics($db_name, $tbl_name, $col_name, $engine) { - $this->send_delete_table_column_statistics($db_name, $tbl_name, $col_name); + $this->send_delete_table_column_statistics($db_name, $tbl_name, $col_name, $engine); return $this->recv_delete_table_column_statistics(); } - public function send_delete_table_column_statistics($db_name, $tbl_name, $col_name) + public function send_delete_table_column_statistics($db_name, $tbl_name, $col_name, $engine) { $args = new \metastore\ThriftHiveMetastore_delete_table_column_statistics_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->col_name = $col_name; + $args->engine = $engine; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -43001,6 +43005,10 @@ class ThriftHiveMetastore_delete_partition_column_statistics_args { * @var string */ public $col_name = null; + /** + * @var string + */ + public $engine = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -43021,6 +43029,10 @@ class ThriftHiveMetastore_delete_partition_column_statistics_args { 'var' => 'col_name', 'type' => TType::STRING, ), + 5 => array( + 'var' => 'engine', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -43036,6 +43048,9 @@ class ThriftHiveMetastore_delete_partition_column_statistics_args { if (isset($vals['col_name'])) { $this->col_name = $vals['col_name']; } + if (isset($vals['engine'])) { + $this->engine = $vals['engine']; + } } } @@ -43086,6 +43101,13 @@ class ThriftHiveMetastore_delete_partition_column_statistics_args { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->engine); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -43119,6 +43141,11 @@ class ThriftHiveMetastore_delete_partition_column_statistics_args { $xfer += $output->writeString($this->col_name); $xfer += $output->writeFieldEnd(); } + if ($this->engine !== null) { + $xfer += $output->writeFieldBegin('engine', TType::STRING, 5); + $xfer += $output->writeString($this->engine); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -43316,6 +43343,10 @@ class ThriftHiveMetastore_delete_table_column_statistics_args { * @var string */ public $col_name = null; + /** + * @var string + */ + public $engine = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -43332,6 +43363,10 @@ class ThriftHiveMetastore_delete_table_column_statistics_args { 'var' => 'col_name', 'type' => TType::STRING, ), + 4 => array( + 'var' => 'engine', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -43344,6 +43379,9 @@ class ThriftHiveMetastore_delete_table_column_statistics_args { if (isset($vals['col_name'])) { $this->col_name = $vals['col_name']; } + if (isset($vals['engine'])) { + $this->engine = $vals['engine']; + } } } @@ -43387,6 +43425,13 @@ class ThriftHiveMetastore_delete_table_column_statistics_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->engine); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -43415,6 +43460,11 @@ class ThriftHiveMetastore_delete_table_column_statistics_args { $xfer += $output->writeString($this->col_name); $xfer += $output->writeFieldEnd(); } + if ($this->engine !== null) { + $xfer += $output->writeFieldBegin('engine', TType::STRING, 4); + $xfer += $output->writeString($this->engine); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php index a09c1d540c..911c44d387 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php @@ -10337,6 +10337,10 @@ class ColumnStatistics { * @var bool */ public $isStatsCompliant = null; + /** + * @var string + */ + public $engine = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -10359,6 +10363,10 @@ class ColumnStatistics { 'var' => 'isStatsCompliant', 'type' => TType::BOOL, ), + 4 => array( + 'var' => 'engine', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -10371,6 +10379,9 @@ class ColumnStatistics { if (isset($vals['isStatsCompliant'])) { $this->isStatsCompliant = $vals['isStatsCompliant']; } + if (isset($vals['engine'])) { + $this->engine = $vals['engine']; + } } } @@ -10426,6 +10437,13 @@ class ColumnStatistics { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->engine); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -10469,6 +10487,11 @@ class ColumnStatistics { $xfer += $output->writeBool($this->isStatsCompliant); $xfer += $output->writeFieldEnd(); } + if ($this->engine !== null) { + $xfer += $output->writeFieldBegin('engine', TType::STRING, 4); + $xfer += $output->writeString($this->engine); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -10993,6 +11016,10 @@ class SetPartitionsStatsRequest { * @var string */ public $validWriteIdList = null; + /** + * @var string + */ + public $engine = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -11018,6 +11045,10 @@ class SetPartitionsStatsRequest { 'var' => 'validWriteIdList', 'type' => TType::STRING, ), + 5 => array( + 'var' => 'engine', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -11033,6 +11064,9 @@ class SetPartitionsStatsRequest { if (isset($vals['validWriteIdList'])) { $this->validWriteIdList = $vals['validWriteIdList']; } + if (isset($vals['engine'])) { + $this->engine = $vals['engine']; + } } } @@ -11094,6 +11128,13 @@ class SetPartitionsStatsRequest { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->engine); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -11139,6 +11180,11 @@ class SetPartitionsStatsRequest { $xfer += $output->writeString($this->validWriteIdList); $xfer += $output->writeFieldEnd(); } + if ($this->engine !== null) { + $xfer += $output->writeFieldBegin('engine', TType::STRING, 5); + $xfer += $output->writeString($this->engine); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -14264,6 +14310,10 @@ class TableStatsRequest { * @var string */ public $validWriteIdList = null; + /** + * @var string + */ + public $engine = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -14292,6 +14342,10 @@ class TableStatsRequest { 'var' => 'validWriteIdList', 'type' => TType::STRING, ), + 6 => array( + 'var' => 'engine', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -14310,6 +14364,9 @@ class TableStatsRequest { if (isset($vals['validWriteIdList'])) { $this->validWriteIdList = $vals['validWriteIdList']; } + if (isset($vals['engine'])) { + $this->engine = $vals['engine']; + } } } @@ -14377,6 +14434,13 @@ class TableStatsRequest { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->engine); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -14427,6 +14491,11 @@ class TableStatsRequest { $xfer += $output->writeString($this->validWriteIdList); $xfer += $output->writeFieldEnd(); } + if ($this->engine !== null) { + $xfer += $output->writeFieldBegin('engine', TType::STRING, 6); + $xfer += $output->writeString($this->engine); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -14461,6 +14530,10 @@ class PartitionsStatsRequest { * @var string */ public $validWriteIdList = null; + /** + * @var string + */ + public $engine = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -14497,6 +14570,10 @@ class PartitionsStatsRequest { 'var' => 'validWriteIdList', 'type' => TType::STRING, ), + 7 => array( + 'var' => 'engine', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -14518,6 +14595,9 @@ class PartitionsStatsRequest { if (isset($vals['validWriteIdList'])) { $this->validWriteIdList = $vals['validWriteIdList']; } + if (isset($vals['engine'])) { + $this->engine = $vals['engine']; + } } } @@ -14602,6 +14682,13 @@ class PartitionsStatsRequest { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->engine); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -14669,6 +14756,11 @@ class PartitionsStatsRequest { $xfer += $output->writeString($this->validWriteIdList); $xfer += $output->writeFieldEnd(); } + if ($this->engine !== null) { + $xfer += $output->writeFieldBegin('engine', TType::STRING, 7); + $xfer += $output->writeString($this->engine); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -16211,6 +16303,10 @@ class GetPartitionsByNamesRequest { * @var string */ public $processorIdentifier = null; + /** + * @var string + */ + public $engine = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -16247,6 +16343,10 @@ class GetPartitionsByNamesRequest { 'var' => 'processorIdentifier', 'type' => TType::STRING, ), + 7 => array( + 'var' => 'engine', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -16268,6 +16368,9 @@ class GetPartitionsByNamesRequest { if (isset($vals['processorIdentifier'])) { $this->processorIdentifier = $vals['processorIdentifier']; } + if (isset($vals['engine'])) { + $this->engine = $vals['engine']; + } } } @@ -16352,6 +16455,13 @@ class GetPartitionsByNamesRequest { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->engine); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -16419,6 +16529,11 @@ class GetPartitionsByNamesRequest { $xfer += $output->writeString($this->processorIdentifier); $xfer += $output->writeFieldEnd(); } + if ($this->engine !== null) { + $xfer += $output->writeFieldBegin('engine', TType::STRING, 7); + $xfer += $output->writeString($this->engine); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -26352,6 +26467,10 @@ class GetTableRequest { * @var string */ public $processorIdentifier = null; + /** + * @var string + */ + public $engine = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -26393,6 +26512,10 @@ class GetTableRequest { 'var' => 'processorIdentifier', 'type' => TType::STRING, ), + 10 => array( + 'var' => 'engine', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -26420,6 +26543,9 @@ class GetTableRequest { if (isset($vals['processorIdentifier'])) { $this->processorIdentifier = $vals['processorIdentifier']; } + if (isset($vals['engine'])) { + $this->engine = $vals['engine']; + } } } @@ -26509,6 +26635,13 @@ class GetTableRequest { $xfer += $input->skip($ftype); } break; + case 10: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->engine); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -26577,6 +26710,11 @@ class GetTableRequest { $xfer += $output->writeString($this->processorIdentifier); $xfer += $output->writeFieldEnd(); } + if ($this->engine !== null) { + $xfer += $output->writeFieldBegin('engine', TType::STRING, 10); + $xfer += $output->writeString($this->engine); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 16add782ce..527aba383b 100755 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -139,8 +139,8 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request)') print(' AggrStats get_aggr_stats_for(PartitionsStatsRequest request)') print(' bool set_aggr_stats_for(SetPartitionsStatsRequest request)') - print(' bool delete_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)') - print(' bool delete_table_column_statistics(string db_name, string tbl_name, string col_name)') + print(' bool delete_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name, string engine)') + print(' bool delete_table_column_statistics(string db_name, string tbl_name, string col_name, string engine)') print(' void create_function(Function func)') print(' void drop_function(string dbName, string funcName)') print(' void alter_function(string dbName, string funcName, Function newFunc)') @@ -1009,16 +1009,16 @@ elif cmd == 'set_aggr_stats_for': pp.pprint(client.set_aggr_stats_for(eval(args[0]),)) elif cmd == 'delete_partition_column_statistics': - if len(args) != 4: - print('delete_partition_column_statistics requires 4 args') + if len(args) != 5: + print('delete_partition_column_statistics requires 5 args') sys.exit(1) - pp.pprint(client.delete_partition_column_statistics(args[0],args[1],args[2],args[3],)) + pp.pprint(client.delete_partition_column_statistics(args[0],args[1],args[2],args[3],args[4],)) elif cmd == 'delete_table_column_statistics': - if len(args) != 3: - print('delete_table_column_statistics requires 3 args') + if len(args) != 4: + print('delete_table_column_statistics requires 4 args') sys.exit(1) - pp.pprint(client.delete_table_column_statistics(args[0],args[1],args[2],)) + pp.pprint(client.delete_table_column_statistics(args[0],args[1],args[2],args[3],)) elif cmd == 'create_function': if len(args) != 1: diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 540e89356c..7f57a0fa63 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -959,22 +959,24 @@ def set_aggr_stats_for(self, request): """ pass - def delete_partition_column_statistics(self, db_name, tbl_name, part_name, col_name): + def delete_partition_column_statistics(self, db_name, tbl_name, part_name, col_name, engine): """ Parameters: - db_name - tbl_name - part_name - col_name + - engine """ pass - def delete_table_column_statistics(self, db_name, tbl_name, col_name): + def delete_table_column_statistics(self, db_name, tbl_name, col_name, engine): """ Parameters: - db_name - tbl_name - col_name + - engine """ pass @@ -6039,24 +6041,26 @@ def recv_set_aggr_stats_for(self): raise result.o4 raise TApplicationException(TApplicationException.MISSING_RESULT, "set_aggr_stats_for failed: unknown result") - def delete_partition_column_statistics(self, db_name, tbl_name, part_name, col_name): + def delete_partition_column_statistics(self, db_name, tbl_name, part_name, col_name, engine): """ Parameters: - db_name - tbl_name - part_name - col_name + - engine """ - self.send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name) + self.send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name, engine) return self.recv_delete_partition_column_statistics() - def send_delete_partition_column_statistics(self, db_name, tbl_name, part_name, col_name): + def send_delete_partition_column_statistics(self, db_name, tbl_name, part_name, col_name, engine): self._oprot.writeMessageBegin('delete_partition_column_statistics', TMessageType.CALL, self._seqid) args = delete_partition_column_statistics_args() args.db_name = db_name args.tbl_name = tbl_name args.part_name = part_name args.col_name = col_name + args.engine = engine args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -6084,22 +6088,24 @@ def recv_delete_partition_column_statistics(self): raise result.o4 raise TApplicationException(TApplicationException.MISSING_RESULT, "delete_partition_column_statistics failed: unknown result") - def delete_table_column_statistics(self, db_name, tbl_name, col_name): + def delete_table_column_statistics(self, db_name, tbl_name, col_name, engine): """ Parameters: - db_name - tbl_name - col_name + - engine """ - self.send_delete_table_column_statistics(db_name, tbl_name, col_name) + self.send_delete_table_column_statistics(db_name, tbl_name, col_name, engine) return self.recv_delete_table_column_statistics() - def send_delete_table_column_statistics(self, db_name, tbl_name, col_name): + def send_delete_table_column_statistics(self, db_name, tbl_name, col_name, engine): self._oprot.writeMessageBegin('delete_table_column_statistics', TMessageType.CALL, self._seqid) args = delete_table_column_statistics_args() args.db_name = db_name args.tbl_name = tbl_name args.col_name = col_name + args.engine = engine args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -12948,7 +12954,7 @@ def process_delete_partition_column_statistics(self, seqid, iprot, oprot): iprot.readMessageEnd() result = delete_partition_column_statistics_result() try: - result.success = self._handler.delete_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name) + result.success = self._handler.delete_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name, args.engine) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12979,7 +12985,7 @@ def process_delete_table_column_statistics(self, seqid, iprot, oprot): iprot.readMessageEnd() result = delete_table_column_statistics_result() try: - result.success = self._handler.delete_table_column_statistics(args.db_name, args.tbl_name, args.col_name) + result.success = self._handler.delete_table_column_statistics(args.db_name, args.tbl_name, args.col_name, args.engine) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -36104,6 +36110,7 @@ class delete_partition_column_statistics_args: - tbl_name - part_name - col_name + - engine """ thrift_spec = ( @@ -36112,13 +36119,15 @@ class delete_partition_column_statistics_args: (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'part_name', None, None, ), # 3 (4, TType.STRING, 'col_name', None, None, ), # 4 + (5, TType.STRING, 'engine', None, None, ), # 5 ) - def __init__(self, db_name=None, tbl_name=None, part_name=None, col_name=None,): + def __init__(self, db_name=None, tbl_name=None, part_name=None, col_name=None, engine=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_name = part_name self.col_name = col_name + self.engine = engine def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -36149,6 +36158,11 @@ def read(self, iprot): self.col_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.engine = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -36175,6 +36189,10 @@ def write(self, oprot): oprot.writeFieldBegin('col_name', TType.STRING, 4) oprot.writeString(self.col_name) oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin('engine', TType.STRING, 5) + oprot.writeString(self.engine) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -36188,6 +36206,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.part_name) value = (value * 31) ^ hash(self.col_name) + value = (value * 31) ^ hash(self.engine) return value def __repr__(self): @@ -36327,6 +36346,7 @@ class delete_table_column_statistics_args: - db_name - tbl_name - col_name + - engine """ thrift_spec = ( @@ -36334,12 +36354,14 @@ class delete_table_column_statistics_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'col_name', None, None, ), # 3 + (4, TType.STRING, 'engine', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, col_name=None,): + def __init__(self, db_name=None, tbl_name=None, col_name=None, engine=None,): self.db_name = db_name self.tbl_name = tbl_name self.col_name = col_name + self.engine = engine def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -36365,6 +36387,11 @@ def read(self, iprot): self.col_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.engine = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -36387,6 +36414,10 @@ def write(self, oprot): oprot.writeFieldBegin('col_name', TType.STRING, 3) oprot.writeString(self.col_name) oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin('engine', TType.STRING, 4) + oprot.writeString(self.engine) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -36399,6 +36430,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.col_name) + value = (value * 31) ^ hash(self.engine) return value def __repr__(self): diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index d6a08bbe32..cbdd76b38c 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -7143,6 +7143,7 @@ class ColumnStatistics: - statsDesc - statsObj - isStatsCompliant + - engine """ thrift_spec = ( @@ -7150,12 +7151,14 @@ class ColumnStatistics: (1, TType.STRUCT, 'statsDesc', (ColumnStatisticsDesc, ColumnStatisticsDesc.thrift_spec), None, ), # 1 (2, TType.LIST, 'statsObj', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 2 (3, TType.BOOL, 'isStatsCompliant', None, None, ), # 3 + (4, TType.STRING, 'engine', None, None, ), # 4 ) - def __init__(self, statsDesc=None, statsObj=None, isStatsCompliant=None,): + def __init__(self, statsDesc=None, statsObj=None, isStatsCompliant=None, engine=None,): self.statsDesc = statsDesc self.statsObj = statsObj self.isStatsCompliant = isStatsCompliant + self.engine = engine def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -7188,6 +7191,11 @@ def read(self, iprot): self.isStatsCompliant = iprot.readBool() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.engine = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -7213,6 +7221,10 @@ def write(self, oprot): oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 3) oprot.writeBool(self.isStatsCompliant) oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin('engine', TType.STRING, 4) + oprot.writeString(self.engine) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -7221,6 +7233,8 @@ def validate(self): raise TProtocol.TProtocolException(message='Required field statsDesc is unset!') if self.statsObj is None: raise TProtocol.TProtocolException(message='Required field statsObj is unset!') + if self.engine is None: + raise TProtocol.TProtocolException(message='Required field engine is unset!') return @@ -7229,6 +7243,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.statsDesc) value = (value * 31) ^ hash(self.statsObj) value = (value * 31) ^ hash(self.isStatsCompliant) + value = (value * 31) ^ hash(self.engine) return value def __repr__(self): @@ -7585,6 +7600,7 @@ class SetPartitionsStatsRequest: - needMerge - writeId - validWriteIdList + - engine """ thrift_spec = ( @@ -7593,13 +7609,15 @@ class SetPartitionsStatsRequest: (2, TType.BOOL, 'needMerge', None, None, ), # 2 (3, TType.I64, 'writeId', None, -1, ), # 3 (4, TType.STRING, 'validWriteIdList', None, None, ), # 4 + (5, TType.STRING, 'engine', None, None, ), # 5 ) - def __init__(self, colStats=None, needMerge=None, writeId=thrift_spec[3][4], validWriteIdList=None,): + def __init__(self, colStats=None, needMerge=None, writeId=thrift_spec[3][4], validWriteIdList=None, engine=None,): self.colStats = colStats self.needMerge = needMerge self.writeId = writeId self.validWriteIdList = validWriteIdList + self.engine = engine def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -7636,6 +7654,11 @@ def read(self, iprot): self.validWriteIdList = iprot.readString() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.engine = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -7665,12 +7688,18 @@ def write(self, oprot): oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4) oprot.writeString(self.validWriteIdList) oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin('engine', TType.STRING, 5) + oprot.writeString(self.engine) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.colStats is None: raise TProtocol.TProtocolException(message='Required field colStats is unset!') + if self.engine is None: + raise TProtocol.TProtocolException(message='Required field engine is unset!') return @@ -7680,6 +7709,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.needMerge) value = (value * 31) ^ hash(self.writeId) value = (value * 31) ^ hash(self.validWriteIdList) + value = (value * 31) ^ hash(self.engine) return value def __repr__(self): @@ -9977,6 +10007,7 @@ class TableStatsRequest: - colNames - catName - validWriteIdList + - engine """ thrift_spec = ( @@ -9986,14 +10017,16 @@ class TableStatsRequest: (3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3 (4, TType.STRING, 'catName', None, None, ), # 4 (5, TType.STRING, 'validWriteIdList', None, None, ), # 5 + (6, TType.STRING, 'engine', None, None, ), # 6 ) - def __init__(self, dbName=None, tblName=None, colNames=None, catName=None, validWriteIdList=None,): + def __init__(self, dbName=None, tblName=None, colNames=None, catName=None, validWriteIdList=None, engine=None,): self.dbName = dbName self.tblName = tblName self.colNames = colNames self.catName = catName self.validWriteIdList = validWriteIdList + self.engine = engine def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -10034,6 +10067,11 @@ def read(self, iprot): self.validWriteIdList = iprot.readString() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.engine = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -10067,6 +10105,10 @@ def write(self, oprot): oprot.writeFieldBegin('validWriteIdList', TType.STRING, 5) oprot.writeString(self.validWriteIdList) oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin('engine', TType.STRING, 6) + oprot.writeString(self.engine) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -10077,6 +10119,8 @@ def validate(self): raise TProtocol.TProtocolException(message='Required field tblName is unset!') if self.colNames is None: raise TProtocol.TProtocolException(message='Required field colNames is unset!') + if self.engine is None: + raise TProtocol.TProtocolException(message='Required field engine is unset!') return @@ -10087,6 +10131,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.colNames) value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.validWriteIdList) + value = (value * 31) ^ hash(self.engine) return value def __repr__(self): @@ -10109,6 +10154,7 @@ class PartitionsStatsRequest: - partNames - catName - validWriteIdList + - engine """ thrift_spec = ( @@ -10119,15 +10165,17 @@ class PartitionsStatsRequest: (4, TType.LIST, 'partNames', (TType.STRING,None), None, ), # 4 (5, TType.STRING, 'catName', None, None, ), # 5 (6, TType.STRING, 'validWriteIdList', None, None, ), # 6 + (7, TType.STRING, 'engine', None, None, ), # 7 ) - def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None, validWriteIdList=None,): + def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None, validWriteIdList=None, engine=None,): self.dbName = dbName self.tblName = tblName self.colNames = colNames self.partNames = partNames self.catName = catName self.validWriteIdList = validWriteIdList + self.engine = engine def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -10178,6 +10226,11 @@ def read(self, iprot): self.validWriteIdList = iprot.readString() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.engine = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -10218,6 +10271,10 @@ def write(self, oprot): oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6) oprot.writeString(self.validWriteIdList) oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin('engine', TType.STRING, 7) + oprot.writeString(self.engine) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -10230,6 +10287,8 @@ def validate(self): raise TProtocol.TProtocolException(message='Required field colNames is unset!') if self.partNames is None: raise TProtocol.TProtocolException(message='Required field partNames is unset!') + if self.engine is None: + raise TProtocol.TProtocolException(message='Required field engine is unset!') return @@ -10241,6 +10300,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.partNames) value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.validWriteIdList) + value = (value * 31) ^ hash(self.engine) return value def __repr__(self): @@ -11280,6 +11340,7 @@ class GetPartitionsByNamesRequest: - get_col_stats - processorCapabilities - processorIdentifier + - engine """ thrift_spec = ( @@ -11290,15 +11351,17 @@ class GetPartitionsByNamesRequest: (4, TType.BOOL, 'get_col_stats', None, None, ), # 4 (5, TType.LIST, 'processorCapabilities', (TType.STRING,None), None, ), # 5 (6, TType.STRING, 'processorIdentifier', None, None, ), # 6 + (7, TType.STRING, 'engine', None, None, ), # 7 ) - def __init__(self, db_name=None, tbl_name=None, names=None, get_col_stats=None, processorCapabilities=None, processorIdentifier=None,): + def __init__(self, db_name=None, tbl_name=None, names=None, get_col_stats=None, processorCapabilities=None, processorIdentifier=None, engine=None,): self.db_name = db_name self.tbl_name = tbl_name self.names = names self.get_col_stats = get_col_stats self.processorCapabilities = processorCapabilities self.processorIdentifier = processorIdentifier + self.engine = engine def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -11349,6 +11412,11 @@ def read(self, iprot): self.processorIdentifier = iprot.readString() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.engine = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -11389,6 +11457,10 @@ def write(self, oprot): oprot.writeFieldBegin('processorIdentifier', TType.STRING, 6) oprot.writeString(self.processorIdentifier) oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin('engine', TType.STRING, 7) + oprot.writeString(self.engine) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -11408,6 +11480,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.get_col_stats) value = (value * 31) ^ hash(self.processorCapabilities) value = (value * 31) ^ hash(self.processorIdentifier) + value = (value * 31) ^ hash(self.engine) return value def __repr__(self): @@ -18432,6 +18505,7 @@ class GetTableRequest: - getColumnStats - processorCapabilities - processorIdentifier + - engine """ thrift_spec = ( @@ -18445,9 +18519,10 @@ class GetTableRequest: (7, TType.BOOL, 'getColumnStats', None, None, ), # 7 (8, TType.LIST, 'processorCapabilities', (TType.STRING,None), None, ), # 8 (9, TType.STRING, 'processorIdentifier', None, None, ), # 9 + (10, TType.STRING, 'engine', None, None, ), # 10 ) - def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None, validWriteIdList=None, getColumnStats=None, processorCapabilities=None, processorIdentifier=None,): + def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None, validWriteIdList=None, getColumnStats=None, processorCapabilities=None, processorIdentifier=None, engine=None,): self.dbName = dbName self.tblName = tblName self.capabilities = capabilities @@ -18456,6 +18531,7 @@ def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None, v self.getColumnStats = getColumnStats self.processorCapabilities = processorCapabilities self.processorIdentifier = processorIdentifier + self.engine = engine def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -18512,6 +18588,11 @@ def read(self, iprot): self.processorIdentifier = iprot.readString() else: iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.engine = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -18557,6 +18638,10 @@ def write(self, oprot): oprot.writeFieldBegin('processorIdentifier', TType.STRING, 9) oprot.writeString(self.processorIdentifier) oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin('engine', TType.STRING, 10) + oprot.writeString(self.engine) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -18578,6 +18663,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.getColumnStats) value = (value * 31) ^ hash(self.processorCapabilities) value = (value * 31) ^ hash(self.processorIdentifier) + value = (value * 31) ^ hash(self.engine) return value def __repr__(self): diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index e7a121a424..5eedc32886 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -1622,11 +1622,13 @@ class ColumnStatistics STATSDESC = 1 STATSOBJ = 2 ISSTATSCOMPLIANT = 3 + ENGINE = 4 FIELDS = { STATSDESC => {:type => ::Thrift::Types::STRUCT, :name => 'statsDesc', :class => ::ColumnStatisticsDesc}, STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}, - ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true} + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true}, + ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine'} } def struct_fields; FIELDS; end @@ -1634,6 +1636,7 @@ class ColumnStatistics def validate raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field statsDesc is unset!') unless @statsDesc raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field statsObj is unset!') unless @statsObj + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field engine is unset!') unless @engine end ::Thrift::Struct.generate_accessors self @@ -1713,18 +1716,21 @@ class SetPartitionsStatsRequest NEEDMERGE = 2 WRITEID = 3 VALIDWRITEIDLIST = 4 + ENGINE = 5 FIELDS = { COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatistics}}, NEEDMERGE => {:type => ::Thrift::Types::BOOL, :name => 'needMerge', :optional => true}, WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true}, - VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, + ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine'} } def struct_fields; FIELDS; end def validate raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colStats is unset!') unless @colStats + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field engine is unset!') unless @engine end ::Thrift::Struct.generate_accessors self @@ -2242,13 +2248,15 @@ class TableStatsRequest COLNAMES = 3 CATNAME = 4 VALIDWRITEIDLIST = 5 + ENGINE = 6 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}}, CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, - VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, + ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine'} } def struct_fields; FIELDS; end @@ -2257,6 +2265,7 @@ class TableStatsRequest raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colNames is unset!') unless @colNames + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field engine is unset!') unless @engine end ::Thrift::Struct.generate_accessors self @@ -2270,6 +2279,7 @@ class PartitionsStatsRequest PARTNAMES = 4 CATNAME = 5 VALIDWRITEIDLIST = 6 + ENGINE = 7 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -2277,7 +2287,8 @@ class PartitionsStatsRequest COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}}, PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}}, CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, - VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, + ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine'} } def struct_fields; FIELDS; end @@ -2287,6 +2298,7 @@ class PartitionsStatsRequest raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colNames is unset!') unless @colNames raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partNames is unset!') unless @partNames + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field engine is unset!') unless @engine end ::Thrift::Struct.generate_accessors self @@ -2518,6 +2530,7 @@ class GetPartitionsByNamesRequest GET_COL_STATS = 4 PROCESSORCAPABILITIES = 5 PROCESSORIDENTIFIER = 6 + ENGINE = 7 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, @@ -2525,7 +2538,8 @@ class GetPartitionsByNamesRequest NAMES => {:type => ::Thrift::Types::LIST, :name => 'names', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, GET_COL_STATS => {:type => ::Thrift::Types::BOOL, :name => 'get_col_stats', :optional => true}, PROCESSORCAPABILITIES => {:type => ::Thrift::Types::LIST, :name => 'processorCapabilities', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, - PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'processorIdentifier', :optional => true} + PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'processorIdentifier', :optional => true}, + ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :optional => true} } def struct_fields; FIELDS; end @@ -4117,6 +4131,7 @@ class GetTableRequest GETCOLUMNSTATS = 7 PROCESSORCAPABILITIES = 8 PROCESSORIDENTIFIER = 9 + ENGINE = 10 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -4126,7 +4141,8 @@ class GetTableRequest VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, GETCOLUMNSTATS => {:type => ::Thrift::Types::BOOL, :name => 'getColumnStats', :optional => true}, PROCESSORCAPABILITIES => {:type => ::Thrift::Types::LIST, :name => 'processorCapabilities', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, - PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'processorIdentifier', :optional => true} + PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'processorIdentifier', :optional => true}, + ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :optional => true} } def struct_fields; FIELDS; end diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index b8b725bbac..831aba6cdb 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -1971,13 +1971,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'set_aggr_stats_for failed: unknown result') end - def delete_partition_column_statistics(db_name, tbl_name, part_name, col_name) - send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name) + def delete_partition_column_statistics(db_name, tbl_name, part_name, col_name, engine) + send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name, engine) return recv_delete_partition_column_statistics() end - def send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name) - send_message('delete_partition_column_statistics', Delete_partition_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :col_name => col_name) + def send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name, engine) + send_message('delete_partition_column_statistics', Delete_partition_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :col_name => col_name, :engine => engine) end def recv_delete_partition_column_statistics() @@ -1990,13 +1990,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'delete_partition_column_statistics failed: unknown result') end - def delete_table_column_statistics(db_name, tbl_name, col_name) - send_delete_table_column_statistics(db_name, tbl_name, col_name) + def delete_table_column_statistics(db_name, tbl_name, col_name, engine) + send_delete_table_column_statistics(db_name, tbl_name, col_name, engine) return recv_delete_table_column_statistics() end - def send_delete_table_column_statistics(db_name, tbl_name, col_name) - send_message('delete_table_column_statistics', Delete_table_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :col_name => col_name) + def send_delete_table_column_statistics(db_name, tbl_name, col_name, engine) + send_message('delete_table_column_statistics', Delete_table_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :col_name => col_name, :engine => engine) end def recv_delete_table_column_statistics() @@ -5315,7 +5315,7 @@ module ThriftHiveMetastore args = read_args(iprot, Delete_partition_column_statistics_args) result = Delete_partition_column_statistics_result.new() begin - result.success = @handler.delete_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name) + result.success = @handler.delete_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name, args.engine) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -5332,7 +5332,7 @@ module ThriftHiveMetastore args = read_args(iprot, Delete_table_column_statistics_args) result = Delete_table_column_statistics_result.new() begin - result.success = @handler.delete_table_column_statistics(args.db_name, args.tbl_name, args.col_name) + result.success = @handler.delete_table_column_statistics(args.db_name, args.tbl_name, args.col_name, args.engine) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -10990,12 +10990,14 @@ module ThriftHiveMetastore TBL_NAME = 2 PART_NAME = 3 COL_NAME = 4 + ENGINE = 5 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, PART_NAME => {:type => ::Thrift::Types::STRING, :name => 'part_name'}, - COL_NAME => {:type => ::Thrift::Types::STRING, :name => 'col_name'} + COL_NAME => {:type => ::Thrift::Types::STRING, :name => 'col_name'}, + ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine'} } def struct_fields; FIELDS; end @@ -11035,11 +11037,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 COL_NAME = 3 + ENGINE = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - COL_NAME => {:type => ::Thrift::Types::STRING, :name => 'col_name'} + COL_NAME => {:type => ::Thrift::Types::STRING, :name => 'col_name'}, + ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine'} } def struct_fields; FIELDS; end diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 153f4b8fd1..08f1b65519 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -925,19 +925,19 @@ public Partition exchange_partition(Map partitionSpecs, String s @Override public Map> getPartitionColumnStatistics( String dbName, String tableName, List partNames, List colNames, - String validWriteIdList) + String engine, String validWriteIdList) throws NoSuchObjectException, MetaException, TException { return getPartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName, - partNames, colNames, validWriteIdList); + partNames, colNames, engine, validWriteIdList); } @Override public Map> getPartitionColumnStatistics( String catName, String dbName, String tableName, List partNames, - List colNames, String validWriteIdList) + List colNames, String engine, String validWriteIdList) throws NoSuchObjectException, MetaException, TException { PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, tableName, colNames, - partNames == null ? new ArrayList() : partNames); + partNames == null ? new ArrayList() : partNames, engine); rqst.setCatName(catName); rqst.setValidWriteIdList(validWriteIdList); return client.get_partitions_statistics_req(rqst).getPartStats(); @@ -945,20 +945,21 @@ public Partition exchange_partition(Map partitionSpecs, String s @Override public AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, - List partNames, String writeIdList) + List partNames, String engine, String writeIdList) throws NoSuchObjectException, MetaException, TException { return getAggrColStatsFor(getDefaultCatalog(conf), dbName, tblName, colNames, - partNames, writeIdList); } + partNames, engine, writeIdList); + } @Override public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, List colNames, - List partNames, String writeIdList) + List partNames, String engine, String writeIdList) throws NoSuchObjectException, MetaException, TException { if (colNames.isEmpty() || partNames.isEmpty()) { LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate } - PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames); + PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames, engine); req.setCatName(catName); req.setValidWriteIdList(writeIdList); return client.get_aggr_stats_for(req); @@ -1854,26 +1855,29 @@ public Partition getPartition(String catName, String dbName, String tblName, @Override public List getPartitionsByNames(String db_name, String tbl_name, - List part_names, boolean getColStats) + List part_names, boolean getColStats, String engine) throws TException { - return getPartitionsByNames(getDefaultCatalog(conf), db_name, tbl_name, part_names, getColStats); + return getPartitionsByNames(getDefaultCatalog(conf), db_name, tbl_name, part_names, getColStats, engine); } @Override public List getPartitionsByNames(String catName, String db_name, String tbl_name, List part_names) throws TException { - return getPartitionsByNames(catName, db_name, tbl_name, part_names, false); + return getPartitionsByNames(catName, db_name, tbl_name, part_names, false, null); } @Override public List getPartitionsByNames(String catName, String db_name, String tbl_name, - List part_names, boolean getColStats) throws TException { + List part_names, boolean getColStats, String engine) throws TException { checkDbAndTableFilters(catName, db_name, tbl_name); GetPartitionsByNamesRequest gpbnr = new GetPartitionsByNamesRequest(prependCatalogToDbName(catName, db_name, conf), tbl_name); gpbnr.setNames(part_names); gpbnr.setGet_col_stats(getColStats); + if (getColStats) { + gpbnr.setEngine(engine); + } if (processorCapabilities != null) gpbnr.setProcessorCapabilities(Arrays.asList(processorCapabilities)); if (processorIdentifier != null) @@ -1921,20 +1925,24 @@ public Table getTable(String dbname, String name) throws TException { } @Override - public Table getTable(String dbname, String name, boolean getColumnStats) throws TException { - return getTable(getDefaultCatalog(conf), dbname, name, getColumnStats); + public Table getTable(String dbname, String name, boolean getColumnStats, String engine) throws TException { + return getTable(getDefaultCatalog(conf), dbname, name, getColumnStats, engine); } @Override public Table getTable(String catName, String dbName, String tableName) throws TException { - return getTable(catName, dbName, tableName, false); + return getTable(catName, dbName, tableName, false, null); } - public Table getTable(String catName, String dbName, String tableName, boolean getColumnStats) throws TException { + public Table getTable(String catName, String dbName, String tableName, + boolean getColumnStats, String engine) throws TException { GetTableRequest req = new GetTableRequest(dbName, tableName); req.setCatName(catName); req.setCapabilities(version); req.setGetColumnStats(getColumnStats); + if (getColumnStats) { + req.setEngine(engine); + } if (processorCapabilities != null) req.setProcessorCapabilities(Arrays.asList(processorCapabilities)); if (processorIdentifier != null) @@ -1947,17 +1955,20 @@ public Table getTable(String catName, String dbName, String tableName, boolean g @Override public Table getTable(String catName, String dbName, String tableName, String validWriteIdList) throws TException { - return getTable(catName, dbName, tableName, validWriteIdList, false); + return getTable(catName, dbName, tableName, validWriteIdList, false, null); } @Override public Table getTable(String catName, String dbName, String tableName, String validWriteIdList, - boolean getColumnStats) throws TException { + boolean getColumnStats, String engine) throws TException { GetTableRequest req = new GetTableRequest(dbName, tableName); req.setCatName(catName); req.setCapabilities(version); req.setValidWriteIdList(validWriteIdList); req.setGetColumnStats(getColumnStats); + if (getColumnStats) { + req.setEngine(engine); + } if (processorCapabilities != null) req.setProcessorCapabilities(Arrays.asList(processorCapabilities)); if (processorIdentifier != null) @@ -2371,6 +2382,7 @@ public boolean updateTableColumnStatistics(ColumnStatistics statsObj) throws TEx // Note: currently this method doesn't set txn properties and thus won't work on txn tables. SetPartitionsStatsRequest req = new SetPartitionsStatsRequest(); req.addToColStats(statsObj); + req.setEngine(statsObj.getEngine()); req.setNeedMerge(false); return client.update_table_column_statistics_req(req).isResult(); } @@ -2383,6 +2395,7 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) throws // Note: currently this method doesn't set txn properties and thus won't work on txn tables. SetPartitionsStatsRequest req = new SetPartitionsStatsRequest(); req.addToColStats(statsObj); + req.setEngine(statsObj.getEngine()); req.setNeedMerge(false); return client.update_partition_column_statistics_req(req).isResult(); } @@ -2410,81 +2423,78 @@ public void flushCache() { @Override public List getTableColumnStatistics(String dbName, String tableName, - List colNames) throws TException { - return getTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colNames); + List colNames, String engine) throws TException { + return getTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colNames, engine); } @Override public List getTableColumnStatistics(String catName, String dbName, - String tableName, - List colNames) throws TException { - TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames); + String tableName, List colNames, String engine) throws TException { + TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames, engine); rqst.setCatName(catName); + rqst.setEngine(engine); return client.get_table_statistics_req(rqst).getTableStats(); } @Override public List getTableColumnStatistics(String dbName, String tableName, - List colNames, - String validWriteIdList) throws TException { + List colNames, String engine, String validWriteIdList) throws TException { return getTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colNames, - validWriteIdList); + engine, validWriteIdList); } @Override public List getTableColumnStatistics(String catName, String dbName, - String tableName, - List colNames, - String validWriteIdList) throws TException { - TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames); + String tableName, List colNames, String engine, String validWriteIdList) throws TException { + TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames, engine); rqst.setCatName(catName); + rqst.setEngine(engine); rqst.setValidWriteIdList(validWriteIdList); return client.get_table_statistics_req(rqst).getTableStats(); } @Override public Map> getPartitionColumnStatistics( - String dbName, String tableName, List partNames, List colNames) + String dbName, String tableName, List partNames, List colNames, String engine) throws TException { - return getPartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName, partNames, colNames); + return getPartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName, partNames, colNames, engine); } @Override public Map> getPartitionColumnStatistics( String catName, String dbName, String tableName, List partNames, - List colNames) throws TException { + List colNames, String engine) throws TException { PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, tableName, colNames, - partNames); + partNames, engine); rqst.setCatName(catName); return client.get_partitions_statistics_req(rqst).getPartStats(); } @Override public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, - String colName) throws TException { + String colName, String engine) throws TException { return deletePartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName, partName, - colName); + colName, engine); } @Override public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, - String partName, String colName) - throws TException { + String partName, String colName, String engine) throws TException { return client.delete_partition_column_statistics(prependCatalogToDbName(catName, dbName, conf), - tableName, partName, colName); + tableName, partName, colName, engine); } @Override - public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) + public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName, String engine) throws TException { - return deleteTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colName); + return deleteTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colName, engine); } @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, - String colName) throws TException { + String colName, String engine) throws TException { return client.delete_table_column_statistics(prependCatalogToDbName(catName, dbName, conf), - tableName, colName); + tableName, colName, engine); } @Override @@ -3434,18 +3444,18 @@ protected void drop_table_with_environment_context(String catName, String dbname @Override public AggrStats getAggrColStatsFor(String dbName, String tblName, - List colNames, List partNames) throws NoSuchObjectException, MetaException, TException { - return getAggrColStatsFor(getDefaultCatalog(conf), dbName, tblName, colNames, partNames); + List colNames, List partNames, String engine) throws NoSuchObjectException, MetaException, TException { + return getAggrColStatsFor(getDefaultCatalog(conf), dbName, tblName, colNames, partNames, engine); } @Override public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, - List colNames, List partNames) throws TException { + List colNames, List partNames, String engine) throws TException { if (colNames.isEmpty() || partNames.isEmpty()) { LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate } - PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames); + PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames, engine); req.setCatName(catName); return client.get_aggr_stats_for(req); } diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index aa7e8dfcbd..014131efdb 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -760,6 +760,7 @@ Table getTable(String dbName, String tableName) throws MetaException, * Name of the table to fetch. * @param getColumnStats * get the column stats, if available, when true + * @param engine engine sending the request * @return An object representing the table. * @throws MetaException * Could not fetch the table @@ -768,7 +769,7 @@ Table getTable(String dbName, String tableName) throws MetaException, * @throws NoSuchObjectException * In case the table wasn't found. */ - Table getTable(String dbName, String tableName, boolean getColumnStats) throws MetaException, + Table getTable(String dbName, String tableName, boolean getColumnStats, String engine) throws MetaException, TException, NoSuchObjectException; /** * Get a table object. @@ -801,12 +802,13 @@ Table getTable(String catName, String dbName, String tableName, * @param tableName table name. * @param validWriteIdList applicable snapshot * @param getColumnStats get the column stats, if available, when true + * @param engine engine sending the request * @return table object. * @throws MetaException Something went wrong, usually in the RDBMS. * @throws TException general thrift error. */ Table getTable(String catName, String dbName, String tableName, - String validWriteIdList, boolean getColumnStats) throws TException; + String validWriteIdList, boolean getColumnStats, String engine) throws TException; /** * Get tables as objects (rather than just fetching their names). This is more expensive and @@ -1510,13 +1512,14 @@ boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, by * @param tbl_name table name * @param part_names list of partition names * @param getColStats if true include statistics in the Partition object + * @param engine engine sending the request * @return list of Partition objects * @throws NoSuchObjectException No such partitions * @throws MetaException error accessing the RDBMS. * @throws TException thrift transport error */ List getPartitionsByNames(String db_name, String tbl_name, List part_names, - boolean getColStats) throws NoSuchObjectException, MetaException, TException; + boolean getColStats, String engine) throws NoSuchObjectException, MetaException, TException; /** * Get partitions by a list of partition names. @@ -1540,13 +1543,14 @@ boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, by * @param tbl_name table name * @param part_names list of partition names * @param getColStats if true, column statistics is added to the Partition objects + * @param engine engine sending the request * @return list of Partition objects * @throws NoSuchObjectException No such partitions * @throws MetaException error accessing the RDBMS. * @throws TException thrift transport error */ List getPartitionsByNames(String catName, String db_name, String tbl_name, - List part_names, boolean getColStats) + List part_names, boolean getColStats, String engine) throws NoSuchObjectException, MetaException, TException; /** @@ -2474,17 +2478,17 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) * @param dbName database name * @param tableName table name * @param colNames list of column names + * @param engine engine sending the request * @return list of column statistics objects, one per column * @throws NoSuchObjectException no such table * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ List getTableColumnStatistics(String dbName, String tableName, - List colNames) throws NoSuchObjectException, MetaException, TException; + List colNames, String engine) throws NoSuchObjectException, MetaException, TException; List getTableColumnStatistics(String dbName, String tableName, - List colNames, - String validWriteIdList) + List colNames, String engine, String validWriteIdList) throws NoSuchObjectException, MetaException, TException; /** @@ -2495,18 +2499,17 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) * @param dbName database name * @param tableName table name * @param colNames list of column names + * @param engine engine sending the request * @return list of column statistics objects, one per column * @throws NoSuchObjectException no such table * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ List getTableColumnStatistics(String catName, String dbName, String tableName, - List colNames) - throws NoSuchObjectException, MetaException, TException; + List colNames, String engine) throws NoSuchObjectException, MetaException, TException; List getTableColumnStatistics(String catName, String dbName, String tableName, - List colNames, - String validWriteIdList) + List colNames, String engine, String validWriteIdList) throws NoSuchObjectException, MetaException, TException; /** * Get the column statistics for a set of columns in a partition. @@ -2515,18 +2518,19 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) * @param partNames partition names. Since these are names they should be of the form * "key1=value1[/key2=value2...]" * @param colNames list of column names + * @param engine engine sending the request * @return map of columns to statistics * @throws NoSuchObjectException no such partition * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ Map> getPartitionColumnStatistics(String dbName, - String tableName, List partNames, List colNames) + String tableName, List partNames, List colNames, String engine) throws NoSuchObjectException, MetaException, TException; Map> getPartitionColumnStatistics(String dbName, String tableName, List partNames, List colNames, - String validWriteIdList) + String engine, String validWriteIdList) throws NoSuchObjectException, MetaException, TException; /** @@ -2537,19 +2541,20 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) * @param partNames partition names. Since these are names they should be of the form * "key1=value1[/key2=value2...]" * @param colNames list of column names + * @param engine engine sending the request * @return map of columns to statistics * @throws NoSuchObjectException no such partition * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ Map> getPartitionColumnStatistics( - String catName, String dbName, String tableName, List partNames, List colNames) - throws NoSuchObjectException, MetaException, TException; + String catName, String dbName, String tableName, List partNames, List colNames, + String engine) throws NoSuchObjectException, MetaException, TException; Map> getPartitionColumnStatistics( String catName, String dbName, String tableName, List partNames, List colNames, - String validWriteIdList) + String engine, String validWriteIdList) throws NoSuchObjectException, MetaException, TException; /** * Delete partition level column statistics given dbName, tableName, partName and colName, or @@ -2558,6 +2563,7 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) * @param tableName table name. * @param partName partition name. * @param colName column name, or null for all columns + * @param engine engine, or null for all engines * @return boolean indicating outcome of the operation * @throws NoSuchObjectException no such partition exists * @throws InvalidObjectException error dropping the stats data @@ -2566,7 +2572,7 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) * @throws InvalidInputException input is invalid or null. */ boolean deletePartitionColumnStatistics(String dbName, String tableName, - String partName, String colName) throws NoSuchObjectException, MetaException, + String partName, String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException; /** @@ -2577,6 +2583,7 @@ boolean deletePartitionColumnStatistics(String dbName, String tableName, * @param tableName table name. * @param partName partition name. * @param colName column name, or null for all columns + * @param engine engine, or null for all engines * @return boolean indicating outcome of the operation * @throws NoSuchObjectException no such partition exists * @throws InvalidObjectException error dropping the stats data @@ -2585,7 +2592,7 @@ boolean deletePartitionColumnStatistics(String dbName, String tableName, * @throws InvalidInputException input is invalid or null. */ boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, - String partName, String colName) + String partName, String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException; /** @@ -2594,6 +2601,7 @@ boolean deletePartitionColumnStatistics(String catName, String dbName, String ta * @param dbName database name * @param tableName table name * @param colName column name, or null to drop stats for all columns + * @param engine engine, or null for all engines * @return boolean indicating the outcome of the operation * @throws NoSuchObjectException No such table * @throws MetaException error accessing the RDBMS @@ -2601,7 +2609,7 @@ boolean deletePartitionColumnStatistics(String catName, String dbName, String ta * @throws TException thrift transport error * @throws InvalidInputException bad input, like a null table name. */ - boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws + boolean deleteTableColumnStatistics(String dbName, String tableName, String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException; /** @@ -2611,6 +2619,7 @@ boolean deleteTableColumnStatistics(String dbName, String tableName, String colN * @param dbName database name * @param tableName table name * @param colName column name, or null to drop stats for all columns + * @param engine engine, or null for all engines * @return boolean indicating the outcome of the operation * @throws NoSuchObjectException No such table * @throws MetaException error accessing the RDBMS @@ -2618,7 +2627,7 @@ boolean deleteTableColumnStatistics(String dbName, String tableName, String colN * @throws TException thrift transport error * @throws InvalidInputException bad input, like a null table name. */ - boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, String colName) + boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException; /** @@ -3435,17 +3444,18 @@ GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( * @param tblName table name * @param colNames list of column names * @param partName list of partition names (not values). + * @param engine engine sending the request * @return aggregated stats for requested partitions * @throws NoSuchObjectException no such table * @throws MetaException error accessing the RDBMS * @throws TException thrift transport exception */ AggrStats getAggrColStatsFor(String dbName, String tblName, - List colNames, List partName) throws NoSuchObjectException, MetaException, TException; + List colNames, List partName, String engine) throws NoSuchObjectException, MetaException, TException; AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, List partName, - String writeIdList) throws NoSuchObjectException, MetaException, TException; + String engine, String writeIdList) throws NoSuchObjectException, MetaException, TException; /** * Get aggregated column stats for a set of partitions. @@ -3454,18 +3464,20 @@ AggrStats getAggrColStatsFor(String dbName, String tblName, * @param tblName table name * @param colNames list of column names * @param partNames list of partition names (not values). + * @param engine engine sending the request * @return aggregated stats for requested partitions * @throws NoSuchObjectException no such table * @throws MetaException error accessing the RDBMS * @throws TException thrift transport exception */ AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, - List colNames, List partNames) + List colNames, List partNames, + String engine) throws NoSuchObjectException, MetaException, TException; AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, List colNames, List partNames, - String writeIdList) + String engine, String writeIdList) throws NoSuchObjectException, MetaException, TException; /** * Set table or partition column statistics. diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index ca46a8bb3b..e56c959a37 100644 --- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -588,8 +588,9 @@ struct ColumnStatisticsDesc { struct ColumnStatistics { 1: required ColumnStatisticsDesc statsDesc, 2: required list statsObj, -3: optional bool isStatsCompliant // Are the stats isolation-level-compliant with the +3: optional bool isStatsCompliant, // Are the stats isolation-level-compliant with the // the calling query? +4: required string engine } struct PartitionListComposingSpec { @@ -617,7 +618,8 @@ struct SetPartitionsStatsRequest { 1: required list colStats, 2: optional bool needMerge, //stats need to be merged with the existing stats 3: optional i64 writeId=-1, // writeId for the current query that updates the stats -4: optional string validWriteIdList // valid write id list for the table for which this struct is being sent +4: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent +5: required string engine //engine creating the current request } struct SetPartitionsStatsResponse { @@ -764,7 +766,8 @@ struct TableStatsRequest { 2: required string tblName, 3: required list colNames 4: optional string catName, - 5: optional string validWriteIdList // valid write id list for the table for which this struct is being sent + 5: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent + 6: required string engine //engine creating the current request } struct PartitionsStatsRequest { @@ -773,7 +776,8 @@ struct PartitionsStatsRequest { 3: required list colNames, 4: required list partNames, 5: optional string catName, - 6: optional string validWriteIdList // valid write id list for the table for which this struct is being sent + 6: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent + 7: required string engine //engine creating the current request } // Return type for add_partitions_req @@ -848,7 +852,8 @@ struct GetPartitionsByNamesRequest { 3: optional list names, 4: optional bool get_col_stats, 5: optional list processorCapabilities, - 6: optional string processorIdentifier + 6: optional string processorIdentifier, + 7: optional string engine } struct GetPartitionsByNamesResult { @@ -1356,7 +1361,8 @@ struct GetTableRequest { 6: optional string validWriteIdList, 7: optional bool getColumnStats, 8: optional list processorCapabilities, - 9: optional string processorIdentifier + 9: optional string processorIdentifier, + 10: optional string engine } struct GetTableResult { @@ -1942,8 +1948,8 @@ service ThriftHiveMetastore extends fb303.FacebookService 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) void create_table_with_constraints(1:Table tbl, 2: list primaryKeys, 3: list foreignKeys, - 4: list uniqueConstraints, 5: list notNullConstraints, - 6: list defaultConstraints, 7: list checkConstraints) + 4: list uniqueConstraints, 5: list notNullConstraints, + 6: list defaultConstraints, 7: list checkConstraints) throws (1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) @@ -2270,10 +2276,10 @@ service ThriftHiveMetastore extends fb303.FacebookService // delete APIs attempt to delete column statistics, if found, associated with a given db_name, tbl_name, [part_name] // and col_name. If the delete API doesn't find the statistics record in the metastore, throws NoSuchObjectException // Delete API validates the input and if the input is invalid throws InvalidInputException/InvalidObjectException. - bool delete_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name, 4:string col_name) throws + bool delete_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name, 4:string col_name, 5:string engine) throws (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidObjectException o3, 4:InvalidInputException o4) - bool delete_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name) throws + bool delete_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name, 4:string engine) throws (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidObjectException o3, 4:InvalidInputException o4) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 97564255d2..9e7a7e696c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -18,8 +18,10 @@ package org.apache.hadoop.hive.metastore; import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Lists; +import com.google.common.collect.Multimap; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hive.common.repl.ReplConst; import org.apache.hadoop.hive.common.TableName; @@ -289,7 +291,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam // also the location field in partition parts = msdb.getPartitions(catName, dbname, name, -1); - Map columnStatsNeedUpdated = new HashMap<>(); + Multimap columnStatsNeedUpdated = ArrayListMultimap.create(); for (Partition part : parts) { String oldPartLoc = part.getSd().getLocation(); if (dataWasMoved && oldPartLoc.contains(oldTblLocPath)) { @@ -300,9 +302,9 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam } part.setDbName(newDbName); part.setTableName(newTblName); - ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, + List multiColStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, part.getValues(), part.getSd().getCols(), oldt, part, null, null); - if (colStats != null) { + for (ColumnStatistics colStats : multiColStats) { columnStatsNeedUpdated.put(part, colStats); } } @@ -330,7 +332,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam } } - for (Entry partColStats : columnStatsNeedUpdated.entrySet()) { + for (Entry partColStats : columnStatsNeedUpdated.entries()) { ColumnStatistics newPartColStats = partColStats.getValue(); newPartColStats.getStatsDesc().setDbName(newDbName); newPartColStats.getStatsDesc().setTableName(newTblName); @@ -358,9 +360,9 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam Partition oldPart = new Partition(part); List oldCols = part.getSd().getCols(); part.getSd().setCols(newt.getSd().getCols()); - ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, + List colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, part.getValues(), oldCols, oldt, part, null, null); - assert(colStats == null); + assert(colStats.isEmpty()); if (cascade) { msdb.alterPartition( catName, dbname, name, part.getValues(), part, writeIdList); @@ -665,18 +667,20 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str } String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues()); - ColumnStatistics cs = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldPart.getValues(), + List multiColumnStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldPart.getValues(), oldPart.getSd().getCols(), tbl, new_part, null, null); msdb.alterPartition(catName, dbname, name, part_vals, new_part, validWriteIds); - if (cs != null) { - cs.getStatsDesc().setPartName(newPartName); - try { - msdb.updatePartitionColumnStatistics(cs, new_part.getValues(), - validWriteIds, new_part.getWriteId()); - } catch (InvalidInputException iie) { - throw new InvalidOperationException("Unable to update partition stats in table rename." + iie); - } catch (NoSuchObjectException nsoe) { - // It is ok, ignore + if (!multiColumnStats.isEmpty()) { + for (ColumnStatistics cs : multiColumnStats) { + cs.getStatsDesc().setPartName(newPartName); + try { + msdb.updatePartitionColumnStatistics(cs, new_part.getValues(), + validWriteIds, new_part.getWriteId()); + } catch (InvalidInputException iie) { + throw new InvalidOperationException("Unable to update partition stats in table rename." + iie); + } catch (NoSuchObjectException nsoe) { + // It is ok, ignore + } } } @@ -924,7 +928,7 @@ private Path constructRenamedPath(Path defaultNewPath, Path currentPath) { } @VisibleForTesting - public static List alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable, + public static List alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable, EnvironmentContext ec, String validWriteIds, Configuration conf, List deletedCols) throws MetaException, InvalidObjectException { String catName = normalizeIdentifier(oldTable.isSetCatName() ? oldTable.getCatName() : @@ -933,14 +937,14 @@ private Path constructRenamedPath(Path defaultNewPath, Path currentPath) { String tableName = normalizeIdentifier(oldTable.getTableName()); String newDbName = newTable.getDbName().toLowerCase(); String newTableName = normalizeIdentifier(newTable.getTableName()); - List newStatsObjs = new ArrayList<>(); //if its not called from cahced store then update the table boolean doAlterTable = deletedCols == null; + List newMultiColStats = new ArrayList<>(); try { List oldCols = oldTable.getSd().getCols(); List newCols = newTable.getSd().getCols(); - ColumnStatistics colStats = null; + List multiColStats = null; boolean updateColumnStats = !newDbName.equals(dbName) || !newTableName.equals(tableName) || !MetaStoreServerUtils.columnsIncludedByNameType(oldCols, newCols); // Don't bother in the case of ACID conversion. @@ -955,43 +959,52 @@ private Path constructRenamedPath(Path defaultNewPath, Path currentPath) { // NOTE: this doesn't check stats being compliant, but the alterTable call below does. // The worst we can do is delete the stats. // Collect column stats which need to be rewritten and remove old stats. - colStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames); - if (colStats == null) { + multiColStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames); + if (multiColStats.isEmpty()) { updateColumnStats = false; } else { - List statsObjs = colStats.getStatsObj(); - if (statsObjs != null) { - // for out para, this value is initialized by caller. - if (deletedCols == null) { - deletedCols = new ArrayList<>(); - } - for (ColumnStatisticsObj statsObj : statsObjs) { - boolean found = false; - for (FieldSchema newCol : newCols) { - if (statsObj.getColName().equalsIgnoreCase(newCol.getName()) - && statsObj.getColType().equalsIgnoreCase(newCol.getType())) { - found = true; - break; - } + for (ColumnStatistics colStats : multiColStats) { + List statsObjs = colStats.getStatsObj(); + if (statsObjs != null) { + // for out para, this value is initialized by caller. + if (deletedCols == null) { + deletedCols = new ArrayList<>(); } + List newStatsObjs = new ArrayList<>(); + for (ColumnStatisticsObj statsObj : statsObjs) { + boolean found = false; + for (FieldSchema newCol : newCols) { + if (statsObj.getColName().equalsIgnoreCase(newCol.getName()) + && statsObj.getColType().equalsIgnoreCase(newCol.getType())) { + found = true; + break; + } + } - if (found) { - if (!newDbName.equals(dbName) || !newTableName.equals(tableName)) { + if (found) { + if (!newDbName.equals(dbName) || !newTableName.equals(tableName)) { + if (doAlterTable) { + msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName(), colStats.getEngine()); + } + newStatsObjs.add(statsObj); + deletedCols.add(statsObj.getColName()); + } + } else { if (doAlterTable) { - msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName()); + msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName(), colStats.getEngine()); } - newStatsObjs.add(statsObj); deletedCols.add(statsObj.getColName()); } - } else { - if (doAlterTable) { - msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName()); - } - deletedCols.add(statsObj.getColName()); } - } - if (doAlterTable) { - StatsSetupConst.removeColumnStatsState(newTable.getParameters(), deletedCols); + if (doAlterTable) { + StatsSetupConst.removeColumnStatsState(newTable.getParameters(), deletedCols); + // Change stats + ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); + statsDesc.setDbName(newDbName); + statsDesc.setTableName(newTableName); + colStats.setStatsObj(newStatsObjs); + newMultiColStats.add(colStats); + } } } } @@ -1000,12 +1013,10 @@ private Path constructRenamedPath(Path defaultNewPath, Path currentPath) { if (doAlterTable) { // Change to new table and append stats for the new table msdb.alterTable(catName, dbName, tableName, newTable, validWriteIds); - if (updateColumnStats && !newStatsObjs.isEmpty()) { - ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); - statsDesc.setDbName(newDbName); - statsDesc.setTableName(newTableName); - colStats.setStatsObj(newStatsObjs); - msdb.updateTableColumnStatistics(colStats, validWriteIds, newTable.getWriteId()); + if (updateColumnStats) { + for (ColumnStatistics colStats : newMultiColStats) { + msdb.updateTableColumnStatistics(colStats, validWriteIds, newTable.getWriteId()); + } } } } catch (NoSuchObjectException nsoe) { @@ -1014,14 +1025,14 @@ private Path constructRenamedPath(Path defaultNewPath, Path currentPath) { //should not happen since the input were verified before passed in throw new InvalidObjectException("Invalid inputs to update table column stats: " + e); } - return newStatsObjs; + return newMultiColStats; } - public static ColumnStatistics updateOrGetPartitionColumnStats( + public static List updateOrGetPartitionColumnStats( RawStore msdb, String catName, String dbname, String tblname, List partVals, List oldCols, Table table, Partition part, List newCols, List deletedCols) throws MetaException, InvalidObjectException { - ColumnStatistics newPartsColStats = null; + List newPartsColStats = new ArrayList<>(); boolean updateColumnStats = true; try { // if newCols are not specified, use default ones. @@ -1043,53 +1054,56 @@ public static ColumnStatistics updateOrGetPartitionColumnStats( } List oldPartNames = Lists.newArrayList(oldPartName); // TODO: doesn't take txn stats into account. This method can only remove stats. - List partsColStats = msdb.getPartitionColumnStatistics(catName, dbname, tblname, + List> multiPartsColStats = msdb.getPartitionColumnStatistics(catName, dbname, tblname, oldPartNames, oldColNames); - assert (partsColStats.size() <= 1); + for (List partsColStats : multiPartsColStats) { + assert (partsColStats.size() <= 1); - // for out para, this value is initialized by caller. - if (deletedCols == null) { - deletedCols = new ArrayList<>(); - } else { - // in case deletedCols is provided by caller, stats will be updated by caller. - updateColumnStats = false; - } - for (ColumnStatistics partColStats : partsColStats) { //actually only at most one loop - List newStatsObjs = new ArrayList<>(); - List statsObjs = partColStats.getStatsObj(); - for (ColumnStatisticsObj statsObj : statsObjs) { - boolean found =false; - for (FieldSchema newCol : newCols) { - if (statsObj.getColName().equalsIgnoreCase(newCol.getName()) - && statsObj.getColType().equalsIgnoreCase(newCol.getType())) { - found = true; - break; + // for out para, this value is initialized by caller. + if (deletedCols == null) { + deletedCols = new ArrayList<>(); + } else { + // in case deletedCols is provided by caller, stats will be updated by caller. + updateColumnStats = false; + } + for (ColumnStatistics partColStats : partsColStats) { //actually only at most one loop + List newStatsObjs = new ArrayList<>(); + List statsObjs = partColStats.getStatsObj(); + for (ColumnStatisticsObj statsObj : statsObjs) { + boolean found = false; + for (FieldSchema newCol : newCols) { + if (statsObj.getColName().equalsIgnoreCase(newCol.getName()) + && statsObj.getColType().equalsIgnoreCase(newCol.getType())) { + found = true; + break; + } } - } - if (found) { - if (rename) { + if (found) { + if (rename) { + if (updateColumnStats) { + msdb.deletePartitionColumnStatistics(catName, dbname, tblname, + partColStats.getStatsDesc().getPartName(), partVals, statsObj.getColName(), + partColStats.getEngine()); + } else { + deletedCols.add(statsObj.getColName()); + } + newStatsObjs.add(statsObj); + } + } else { if (updateColumnStats) { - msdb.deletePartitionColumnStatistics(catName, dbname, tblname, - partColStats.getStatsDesc().getPartName(), partVals, statsObj.getColName()); - } else { - deletedCols.add(statsObj.getColName()); + msdb.deletePartitionColumnStatistics(catName, dbname, tblname, partColStats.getStatsDesc().getPartName(), + partVals, statsObj.getColName(), partColStats.getEngine()); } - newStatsObjs.add(statsObj); - } - } else { - if (updateColumnStats) { - msdb.deletePartitionColumnStatistics(catName, dbname, tblname, partColStats.getStatsDesc().getPartName(), - partVals, statsObj.getColName()); + deletedCols.add(statsObj.getColName()); } - deletedCols.add(statsObj.getColName()); } - } - if (updateColumnStats) { - StatsSetupConst.removeColumnStatsState(part.getParameters(), deletedCols); - } - if (!newStatsObjs.isEmpty()) { - partColStats.setStatsObj(newStatsObjs); - newPartsColStats = partColStats; + if (updateColumnStats) { + StatsSetupConst.removeColumnStatsState(part.getParameters(), deletedCols); + } + if (!newStatsObjs.isEmpty()) { + partColStats.setStatsObj(newStatsObjs); + newPartsColStats.add(partColStats); + } } } } catch (NoSuchObjectException nsoe) { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 7e97f8d9dd..ee7368e171 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -3062,7 +3062,7 @@ public Table get_table(final String dbname, final String name) throws MetaExcept NoSuchObjectException { String[] parsedDbName = parseDbName(dbname, conf); return getTableInternal( - parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null, null, false); + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null, null, false, null); } @Override @@ -3136,18 +3136,18 @@ public GetTableResult get_table_req(GetTableRequest req) throws MetaException, NoSuchObjectException { String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); return new GetTableResult(getTableInternal(catName, req.getDbName(), req.getTblName(), - req.getCapabilities(), req.getValidWriteIdList(), req.isGetColumnStats(), + req.getCapabilities(), req.getValidWriteIdList(), req.isGetColumnStats(), req.getEngine(), req.getProcessorCapabilities(), req.getProcessorIdentifier())); } private Table getTableInternal(String catName, String dbname, String name, - ClientCapabilities capabilities, String writeIdList, boolean getColumnStats) + ClientCapabilities capabilities, String writeIdList, boolean getColumnStats, String engine) throws MetaException, NoSuchObjectException { - return getTableInternal(catName, dbname, name, capabilities, writeIdList, getColumnStats, null, null); + return getTableInternal(catName, dbname, name, capabilities, writeIdList, getColumnStats, engine, null, null); } private Table getTableInternal(String catName, String dbname, String name, - ClientCapabilities capabilities, String writeIdList, boolean getColumnStats, + ClientCapabilities capabilities, String writeIdList, boolean getColumnStats, String engine, List processorCapabilities, String processorId) throws MetaException, NoSuchObjectException { if (isInTest) { @@ -3159,7 +3159,7 @@ private Table getTableInternal(String catName, String dbname, String name, startTableFunction("get_table", catName, dbname, name); Exception ex = null; try { - t = get_table_core(catName, dbname, name, writeIdList, getColumnStats); + t = get_table_core(catName, dbname, name, writeIdList, getColumnStats, engine); if (MetaStoreUtils.isInsertOnlyTableParam(t.getParameters())) { assertClientHasCapability(capabilities, ClientCapability.INSERT_ONLY_TABLES, "insert-only tables", "get_table_req"); @@ -3228,14 +3228,14 @@ public Table get_table_core( final String name, final String writeIdList) throws MetaException, NoSuchObjectException { - return get_table_core(catName, dbname, name, writeIdList, false); + return get_table_core(catName, dbname, name, writeIdList, false, null); } public Table get_table_core(final String catName, final String dbname, final String name, final String writeIdList, - boolean getColumnStats) + boolean getColumnStats, String engine) throws MetaException, NoSuchObjectException { Table t = null; try { @@ -3248,7 +3248,7 @@ public Table get_table_core(final String catName, // If column statistics was requested and is valid fetch it. if (getColumnStats) { ColumnStatistics colStats = getMS().getTableColumnStatistics(catName, dbname, name, - StatsSetupConst.getColumnsHavingStats(t.getParameters()), writeIdList); + StatsSetupConst.getColumnsHavingStats(t.getParameters()), engine, writeIdList); if (colStats != null) { t.setColStats(colStats); } @@ -6067,7 +6067,7 @@ public ColumnStatistics get_table_column_statistics(String dbName, String tableN try { statsObj = getMS().getTableColumnStatistics( parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, Lists.newArrayList(colName), - null); + "hive", null); if (statsObj != null) { assert statsObj.getStatsObjSize() <= 1; } @@ -6093,7 +6093,7 @@ public TableStatsResult get_table_statistics_req(TableStatsRequest request) thro try { ColumnStatistics cs = getMS().getTableColumnStatistics( catName, dbName, tblName, lowerCaseColNames, - request.getValidWriteIdList()); + request.getEngine(), request.getValidWriteIdList()); // Note: stats compliance is not propagated to the client; instead, we just return nothing // if stats are not compliant for now. This won't work for stats merging, but that // is currently only done on metastore size (see set_aggr...). @@ -6127,7 +6127,8 @@ public ColumnStatistics get_partition_column_statistics(String dbName, String ta try { List list = getMS().getPartitionColumnStatistics( parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, - Lists.newArrayList(convertedPartName), Lists.newArrayList(colName)); + Lists.newArrayList(convertedPartName), Lists.newArrayList(colName), + "hive"); if (list.isEmpty()) { return null; } @@ -6162,7 +6163,7 @@ public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsReques try { List stats = getMS().getPartitionColumnStatistics( catName, dbName, tblName, lowerCasePartNames, lowerCaseColNames, - request.isSetValidWriteIdList() ? request.getValidWriteIdList() : null); + request.getEngine(), request.isSetValidWriteIdList() ? request.getValidWriteIdList() : null); Map> map = new HashMap<>(); if (stats != null) { for (ColumnStatistics stat : stats) { @@ -6339,7 +6340,7 @@ public SetPartitionsStatsResponse update_partition_column_statistics_req( @Override public boolean delete_partition_column_statistics(String dbName, String tableName, - String partName, String colName) throws TException { + String partName, String colName, String engine) throws TException { dbName = dbName.toLowerCase(); String[] parsedDbName = parseDbName(dbName, conf); tableName = tableName.toLowerCase(); @@ -6363,19 +6364,19 @@ public boolean delete_partition_column_statistics(String dbName, String tableNam } ret = getMS().deletePartitionColumnStatistics(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, - convertedPartName, partVals, colName); + convertedPartName, partVals, colName, engine); if (ret) { if (transactionalListeners != null && !transactionalListeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.DELETE_PARTITION_COLUMN_STAT, new DeletePartitionColumnStatEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, - convertedPartName, partVals, colName, this)); + convertedPartName, partVals, colName, engine, this)); } if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.DELETE_PARTITION_COLUMN_STAT, new DeletePartitionColumnStatEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, - convertedPartName, partVals, colName, this)); + convertedPartName, partVals, colName, engine, this)); } } committed = getMS().commitTransaction(); @@ -6389,7 +6390,7 @@ public boolean delete_partition_column_statistics(String dbName, String tableNam } @Override - public boolean delete_table_column_statistics(String dbName, String tableName, String colName) + public boolean delete_table_column_statistics(String dbName, String tableName, String colName, String engine) throws TException { dbName = dbName.toLowerCase(); tableName = tableName.toLowerCase(); @@ -6414,19 +6415,19 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S throw new MetaException("Cannot delete stats via this API for a transactional table"); } - ret = getMS().deleteTableColumnStatistics(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, colName); + ret = getMS().deleteTableColumnStatistics(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, colName, engine); if (ret) { if (transactionalListeners != null && !transactionalListeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.DELETE_TABLE_COLUMN_STAT, new DeleteTableColumnStatEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tableName, colName, this)); + tableName, colName, engine, this)); } if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.DELETE_TABLE_COLUMN_STAT, new DeleteTableColumnStatEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tableName, colName, this)); + tableName, colName, engine, this)); } } committed = getMS().commitTransaction(); @@ -6583,7 +6584,7 @@ private int get_num_partitions_by_expr(final String catName, final String dbName @Override public List get_partitions_by_names(final String dbName, final String tblName, final List partNames) throws TException { - return get_partitions_by_names(dbName, tblName, partNames, false); + return get_partitions_by_names(dbName, tblName, partNames, false, null); } @Override @@ -6591,19 +6592,19 @@ public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNam throws TException { List partitions = get_partitions_by_names(gpbnr.getDb_name(), gpbnr.getTbl_name(), gpbnr.getNames(), - gpbnr.isSetGet_col_stats() && gpbnr.isGet_col_stats(), gpbnr.getProcessorCapabilities(), - gpbnr.getProcessorIdentifier()); + gpbnr.isSetGet_col_stats() && gpbnr.isGet_col_stats(), gpbnr.getEngine(), + gpbnr.getProcessorCapabilities(), gpbnr.getProcessorIdentifier()); return new GetPartitionsByNamesResult(partitions); } public List get_partitions_by_names(final String dbName, final String tblName, - final List partNames, boolean getColStats) throws TException { - return get_partitions_by_names(dbName, tblName, partNames, getColStats, null, null); + final List partNames, boolean getColStats, String engine) throws TException { + return get_partitions_by_names(dbName, tblName, partNames, getColStats, engine, null, null); } public List get_partitions_by_names(final String dbName, final String tblName, - final List partNames, boolean getColStats, List processorCapabilities, - String processorId) throws TException { + final List partNames, boolean getColStats, String engine, + List processorCapabilities, String processorId) throws TException { String[] dbNameParts = parseDbName(dbName, conf); String parsedCatName = dbNameParts[CAT_NAME]; @@ -6632,7 +6633,8 @@ public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNam List partColStatsList = getMS().getPartitionColumnStatistics(parsedCatName, parsedDbName, tblName, Collections.singletonList(partName), - StatsSetupConst.getColumnsHavingStats(part.getParameters())); + StatsSetupConst.getColumnsHavingStats(part.getParameters()), + engine); if (partColStatsList != null && !partColStatsList.isEmpty()) { ColumnStatistics partColStats = partColStatsList.get(0); if (partColStats != null) { @@ -8120,7 +8122,7 @@ public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws TExce try { aggrStats = getMS().get_aggr_stats_for(catName, dbName, tblName, - lowerCasePartNames, lowerCaseColNames, request.getValidWriteIdList()); + lowerCasePartNames, lowerCaseColNames, request.getEngine(), request.getValidWriteIdList()); return aggrStats; } finally { endFunction("get_aggr_stats_for", aggrStats == null, null, request.getTblName()); @@ -8198,7 +8200,7 @@ private boolean updatePartColumnStatsWithMerge(String catName, String dbName, St List partitionNames = new ArrayList<>(); partitionNames.addAll(newStatsMap.keySet()); List csOlds = ms.getPartitionColumnStatistics(catName, dbName, tableName, - partitionNames, colNames, request.getValidWriteIdList()); + partitionNames, colNames, request.getEngine(), request.getValidWriteIdList()); if (newStatsMap.values().size() != csOlds.size()) { // some of the partitions miss stats. LOG.debug("Some of the partitions miss stats."); @@ -8274,7 +8276,7 @@ private boolean updateTableColumnStatsWithMerge(String catName, String dbName, S boolean isCommitted = false, result = false; try { ColumnStatistics csOld = ms.getTableColumnStatistics(catName, dbName, tableName, colNames, - request.getValidWriteIdList()); + request.getEngine(), request.getValidWriteIdList()); // we first use the valid stats list to prune the stats boolean isInvalidTxnStats = csOld != null && csOld.isSetIsStatsCompliant() && !csOld.isIsStatsCompliant(); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index acb4646169..54fe1eb608 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -1231,28 +1231,31 @@ public void visit(LeafNode node) throws MetaException { * @param dbName the database name of the table * @param tableName the table name * @param colNames the list of the column names + * @param engine engine making the request * @return the column statistics for the specified columns * @throws MetaException */ public ColumnStatistics getTableStats(final String catName, final String dbName, - final String tableName, List colNames, - boolean enableBitVector) throws MetaException { + final String tableName, List colNames, String engine, + boolean enableBitVector) throws MetaException { if (colNames == null || colNames.isEmpty()) { return null; } final boolean doTrace = LOG.isDebugEnabled(); final String queryText0 = "select " + getStatsList(enableBitVector) + " from " + TAB_COL_STATS - + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? and \"COLUMN_NAME\" in ("; + + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + + " and \"ENGINE\" = ? and \"COLUMN_NAME\" in ("; Batchable b = new Batchable() { @Override public List run(List input) throws MetaException { String queryText = queryText0 + makeParams(input.size()) + ")"; - Object[] params = new Object[input.size() + 3]; + Object[] params = new Object[input.size() + 4]; params[0] = catName; params[1] = dbName; params[2] = tableName; + params[3] = engine; for (int i = 0; i < input.size(); ++i) { - params[i + 3] = input.get(i); + params[i + 4] = input.get(i); } long start = doTrace ? System.nanoTime() : 0; Query query = pm.newQuery("javax.jdo.query.SQL", queryText); @@ -1272,14 +1275,15 @@ public ColumnStatistics getTableStats(final String catName, final String dbName, } ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tableName); csd.setCatName(catName); - ColumnStatistics result = makeColumnStats(list, csd, 0); + ColumnStatistics result = makeColumnStats(list, csd, 0, engine); b.closeAllQueries(); return result; } public AggrStats aggrColStatsForPartitions(String catName, String dbName, String tableName, - List partNames, List colNames, boolean useDensityFunctionForNDVEstimation, - double ndvTuner, boolean enableBitVector) throws MetaException { + List partNames, List colNames, String engine, + boolean useDensityFunctionForNDVEstimation, double ndvTuner, boolean enableBitVector) + throws MetaException { if (colNames.isEmpty() || partNames.isEmpty()) { LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval"); return new AggrStats(Collections.emptyList(), 0); // Nothing to aggregate @@ -1305,14 +1309,14 @@ public AggrStats aggrColStatsForPartitions(String catName, String dbName, String partsFound = colStatsAggrCached.getNumPartsCached(); } else { if (computePartsFound) { - partsFound = partsFoundForPartitions(catName, dbName, tableName, partNames, colNames); + partsFound = partsFoundForPartitions(catName, dbName, tableName, partNames, colNames, engine); computePartsFound = false; } List colNamesForDB = new ArrayList<>(); colNamesForDB.add(colName); // Read aggregated stats for one column colStatsAggrFromDB = - columnStatisticsObjForPartitions(catName, dbName, tableName, partNames, colNamesForDB, + columnStatisticsObjForPartitions(catName, dbName, tableName, partNames, colNamesForDB, engine, partsFound, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector); if (!colStatsAggrFromDB.isEmpty()) { ColumnStatisticsObj colStatsAggr = colStatsAggrFromDB.get(0); @@ -1323,9 +1327,9 @@ public AggrStats aggrColStatsForPartitions(String catName, String dbName, String } } } else { - partsFound = partsFoundForPartitions(catName, dbName, tableName, partNames, colNames); + partsFound = partsFoundForPartitions(catName, dbName, tableName, partNames, colNames, engine); colStatsList = - columnStatisticsObjForPartitions(catName, dbName, tableName, partNames, colNames, partsFound, + columnStatisticsObjForPartitions(catName, dbName, tableName, partNames, colNames, engine, partsFound, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector); } LOG.info("useDensityFunctionForNDVEstimation = " + useDensityFunctionForNDVEstimation @@ -1345,12 +1349,13 @@ private BloomFilter createPartsBloomFilter(int maxPartsPerCacheNode, double fpp, private long partsFoundForPartitions( final String catName, final String dbName, final String tableName, - final List partNames, List colNames) throws MetaException { + final List partNames, List colNames, String engine) throws MetaException { assert !colNames.isEmpty() && !partNames.isEmpty(); final boolean doTrace = LOG.isDebugEnabled(); final String queryText0 = "select count(\"COLUMN_NAME\") from " + PART_COL_STATS + "" + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + " and \"COLUMN_NAME\" in (%1$s) and \"PARTITION_NAME\" in (%2$s)" + + " and \"ENGINE\" = ? " + " group by \"PARTITION_NAME\""; List allCounts = Batchable.runBatched(batchSize, colNames, new Batchable() { @Override @@ -1365,7 +1370,7 @@ private long partsFoundForPartitions( Query query = pm.newQuery("javax.jdo.query.SQL", queryText); try { Object qResult = executeWithArray(query, prepareParams( - catName, dbName, tableName, inputPartNames, inputColName), queryText); + catName, dbName, tableName, inputPartNames, inputColName, engine), queryText); long end = doTrace ? System.nanoTime() : 0; MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, end); ForwardQueryResult fqr = (ForwardQueryResult) qResult; @@ -1391,9 +1396,9 @@ private long partsFoundForPartitions( } private List columnStatisticsObjForPartitions( - final String catName, final String dbName, - final String tableName, final List partNames, List colNames, long partsFound, - final boolean useDensityFunctionForNDVEstimation, final double ndvTuner, final boolean enableBitVector) throws MetaException { + final String catName, final String dbName, final String tableName, final List partNames, + List colNames, String engine, long partsFound, final boolean useDensityFunctionForNDVEstimation, + final double ndvTuner, final boolean enableBitVector) throws MetaException { final boolean areAllPartsFound = (partsFound == partNames.size()); return Batchable.runBatched(batchSize, colNames, new Batchable() { @Override @@ -1402,7 +1407,7 @@ private long partsFoundForPartitions( @Override public List run(List inputPartNames) throws MetaException { return columnStatisticsObjForPartitionsBatch(catName, dbName, tableName, inputPartNames, - inputColNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector); + inputColNames, engine, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector); } }); } @@ -1445,32 +1450,32 @@ private long partsFoundForPartitions( /** Should be called with the list short enough to not trip up Oracle/etc. */ private List columnStatisticsObjForPartitionsBatch(String catName, String dbName, - String tableName, List partNames, List colNames, boolean areAllPartsFound, - boolean useDensityFunctionForNDVEstimation, double ndvTuner, boolean enableBitVector) + String tableName, List partNames, List colNames, String engine, + boolean areAllPartsFound, boolean useDensityFunctionForNDVEstimation, double ndvTuner, boolean enableBitVector) throws MetaException { if (enableBitVector) { - return aggrStatsUseJava(catName, dbName, tableName, partNames, colNames, areAllPartsFound, + return aggrStatsUseJava(catName, dbName, tableName, partNames, colNames, engine, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner); } else { - return aggrStatsUseDB(catName, dbName, tableName, partNames, colNames, areAllPartsFound, + return aggrStatsUseDB(catName, dbName, tableName, partNames, colNames, engine, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner); } } private List aggrStatsUseJava(String catName, String dbName, String tableName, - List partNames, List colNames, boolean areAllPartsFound, + List partNames, List colNames, String engine, boolean areAllPartsFound, boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException { // 1. get all the stats for colNames in partNames; List partStats = - getPartitionStats(catName, dbName, tableName, partNames, colNames, true); + getPartitionStats(catName, dbName, tableName, partNames, colNames, engine, true); // 2. use util function to aggr stats return MetaStoreServerUtils.aggrPartitionStats(partStats, catName, dbName, tableName, partNames, colNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner); } private List aggrStatsUseDB(String catName, String dbName, - String tableName, List partNames, List colNames, boolean areAllPartsFound, - boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException { + String tableName, List partNames, List colNames, String engine, + boolean areAllPartsFound, boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException { // TODO: all the extrapolation logic should be moved out of this class, // only mechanical data retrieval should remain here. String commonPrefix = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", " @@ -1508,10 +1513,11 @@ private long partsFoundForPartitions( if (areAllPartsFound) { queryText = commonPrefix + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + + " and \"ENGINE\" = ? " + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\""; start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); - qResult = executeWithArray(query, prepareParams(catName, dbName, tableName, partNames, colNames), + qResult = executeWithArray(query, prepareParams(catName, dbName, tableName, partNames, colNames, engine), queryText); if (qResult == null) { query.closeAll(); @@ -1537,10 +1543,11 @@ private long partsFoundForPartitions( + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + + " and \"ENGINE\" = ? " + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\""; start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); - qResult = executeWithArray(query, prepareParams(catName, dbName, tableName, partNames, colNames), + qResult = executeWithArray(query, prepareParams(catName, dbName, tableName, partNames, colNames, engine), queryText); end = doTrace ? System.nanoTime() : 0; MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, end); @@ -1571,11 +1578,13 @@ private long partsFoundForPartitions( if (noExtraColumnNames.size() != 0) { queryText = commonPrefix + " and \"COLUMN_NAME\" in (" + makeParams(noExtraColumnNames.size()) + ")" + " and \"PARTITION_NAME\" in (" - + makeParams(partNames.size()) + ")" + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\""; + + makeParams(partNames.size()) + ")" + + " and \"ENGINE\" = ? " + + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\""; start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); qResult = executeWithArray(query, - prepareParams(catName, dbName, tableName, partNames, noExtraColumnNames), queryText); + prepareParams(catName, dbName, tableName, partNames, noExtraColumnNames, engine), queryText); if (qResult == null) { query.closeAll(); return Collections.emptyList(); @@ -1600,15 +1609,16 @@ private long partsFoundForPartitions( Map> sumMap = new HashMap>(); queryText = "select \"COLUMN_NAME\", sum(\"NUM_NULLS\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\"), sum(\"NUM_DISTINCTS\")" + " from " + PART_COL_STATS + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " - + " and \"COLUMN_NAME\" in (" + makeParams(extraColumnNameTypeParts.size()) - + ") and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) - + ") group by \"COLUMN_NAME\""; + + " and \"COLUMN_NAME\" in (" + makeParams(extraColumnNameTypeParts.size()) + ")" + + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + + " and \"ENGINE\" = ? " + + " group by \"COLUMN_NAME\""; start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); List extraColumnNames = new ArrayList(); extraColumnNames.addAll(extraColumnNameTypeParts.keySet()); qResult = executeWithArray(query, - prepareParams(catName, dbName, tableName, partNames, extraColumnNames), queryText); + prepareParams(catName, dbName, tableName, partNames, extraColumnNames, engine), queryText); if (qResult == null) { query.closeAll(); return Collections.emptyList(); @@ -1674,18 +1684,20 @@ private long partsFoundForPartitions( + "\",\"PARTITION_NAME\" from " + PART_COL_STATS + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + + " and \"ENGINE\" = ? " + " order by \"" + colStatName + "\""; } else { queryText = "select \"" + colStatName + "\",\"PARTITION_NAME\" from " + PART_COL_STATS + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + + " and \"ENGINE\" = ? " + " order by cast(\"" + colStatName + "\" as decimal)"; } start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); qResult = executeWithArray(query, - prepareParams(catName, dbName, tableName, partNames, Arrays.asList(colName)), queryText); + prepareParams(catName, dbName, tableName, partNames, Arrays.asList(colName), engine), queryText); if (qResult == null) { query.closeAll(); return Collections.emptyList(); @@ -1710,11 +1722,13 @@ private long partsFoundForPartitions( + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\")" + " from " + PART_COL_STATS + "" + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?" + " and \"PARTITION_NAME\" in (" - + makeParams(partNames.size()) + ")" + " group by \"COLUMN_NAME\""; + + makeParams(partNames.size()) + ")" + + " and \"ENGINE\" = ? " + + " group by \"COLUMN_NAME\""; start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); qResult = executeWithArray(query, - prepareParams(catName, dbName, tableName, partNames, Arrays.asList(colName)), queryText); + prepareParams(catName, dbName, tableName, partNames, Arrays.asList(colName), engine), queryText); if (qResult == null) { query.closeAll(); return Collections.emptyList(); @@ -1760,9 +1774,8 @@ private ColumnStatisticsObj prepareCSObjWithAdjustedNDV(Object[] row, int i, } private Object[] prepareParams(String catName, String dbName, String tableName, - List partNames, List colNames) throws MetaException { - - Object[] params = new Object[colNames.size() + partNames.size() + 3]; + List partNames, List colNames, String engine) throws MetaException { + Object[] params = new Object[colNames.size() + partNames.size() + 4]; int paramI = 0; params[paramI++] = catName; params[paramI++] = dbName; @@ -1773,13 +1786,14 @@ private ColumnStatisticsObj prepareCSObjWithAdjustedNDV(Object[] row, int i, for (String partName : partNames) { params[paramI++] = partName; } + params[paramI++] = engine; return params; } public List getPartitionStats( final String catName, final String dbName, final String tableName, final List partNames, - List colNames, boolean enableBitVector) throws MetaException { + List colNames, String engine, boolean enableBitVector) throws MetaException { if (colNames.isEmpty() || partNames.isEmpty()) { return Collections.emptyList(); } @@ -1787,7 +1801,9 @@ private ColumnStatisticsObj prepareCSObjWithAdjustedNDV(Object[] row, int i, final String queryText0 = "select \"PARTITION_NAME\", " + getStatsList(enableBitVector) + " from " + " " + PART_COL_STATS + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? and " + "\"COLUMN_NAME\"" - + " in (%1$s) AND \"PARTITION_NAME\" in (%2$s) order by \"PARTITION_NAME\""; + + " in (%1$s) AND \"PARTITION_NAME\" in (%2$s) " + + " and \"ENGINE\" = ? " + + " order by \"PARTITION_NAME\""; Batchable b = new Batchable() { @Override public List run(final List inputColNames) throws MetaException { @@ -1799,7 +1815,7 @@ private ColumnStatisticsObj prepareCSObjWithAdjustedNDV(Object[] row, int i, long start = doTrace ? System.nanoTime() : 0; Query query = pm.newQuery("javax.jdo.query.SQL", queryText); Object qResult = executeWithArray(query, prepareParams( - catName, dbName, tableName, inputPartNames, inputColNames), queryText); + catName, dbName, tableName, inputPartNames, inputColNames, engine), queryText); MetastoreDirectSqlUtils.timingTrace(doTrace, queryText0, start, (doTrace ? System.nanoTime() : 0)); if (qResult == null) { query.closeAll(); @@ -1831,7 +1847,7 @@ private ColumnStatisticsObj prepareCSObjWithAdjustedNDV(Object[] row, int i, ColumnStatisticsDesc csd = new ColumnStatisticsDesc(false, dbName, tableName); csd.setCatName(catName); csd.setPartName(lastPartName); - result.add(makeColumnStats(list.subList(from, i), csd, 1)); + result.add(makeColumnStats(list.subList(from, i), csd, 1, engine)); } lastPartName = partName; from = i; @@ -1851,7 +1867,7 @@ private final String getStatsList(boolean enableBitVector) { } private ColumnStatistics makeColumnStats( - List list, ColumnStatisticsDesc csd, int offset) throws MetaException { + List list, ColumnStatisticsDesc csd, int offset, String engine) throws MetaException { ColumnStatistics result = new ColumnStatistics(); result.setStatsDesc(csd); List csos = new ArrayList(list.size()); @@ -1867,6 +1883,7 @@ private ColumnStatistics makeColumnStats( Deadline.checkTimeout(); } result.setStatsObj(csos); + result.setEngine(engine); return result; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 509fcb25ad..304b80c88e 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -1133,7 +1133,7 @@ public boolean dropTable(String catName, String dbName, String tableName) } // delete column statistics if present try { - deleteTableColumnStatistics(catName, dbName, tableName, null); + deleteTableColumnStatistics(catName, dbName, tableName, null, null); } catch (NoSuchObjectException e) { LOG.info("Found no table level column statistics associated with {} to delete", TableName.getQualified(catName, dbName, tableName)); @@ -2627,7 +2627,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio // delete partition level column stats if it exists try { - deletePartitionColumnStatistics(catName, dbName, tableName, partName, part.getValues(), null); + deletePartitionColumnStatistics(catName, dbName, tableName, partName, part.getValues(), null, null); } catch (NoSuchObjectException e) { LOG.info("No column statistics records found to delete"); } @@ -8497,12 +8497,12 @@ private void writeMPartitionColumnStatistics(Table table, Partition partition, * @throws MetaException */ private Map getPartitionColStats(Table table, - List colNames) throws NoSuchObjectException, MetaException { + List colNames, String engine) throws NoSuchObjectException, MetaException { Map statsMap = Maps.newHashMap(); QueryWrapper queryWrapper = new QueryWrapper(); try { List stats = getMTableColumnStatistics(table, - colNames, queryWrapper); + colNames, engine, queryWrapper); for(MTableColumnStatistics cStat : stats) { statsMap.put(cStat.getColName(), cStat); } @@ -8531,13 +8531,13 @@ private void writeMPartitionColumnStatistics(Table table, Partition partition, for (ColumnStatisticsObj statsObj : statsObjs) { colNames.add(statsObj.getColName()); } - Map oldStats = getPartitionColStats(table, colNames); + Map oldStats = getPartitionColStats(table, colNames, colStats.getEngine()); for (ColumnStatisticsObj statsObj:statsObjs) { // We have to get mtable again because DataNucleus. MTableColumnStatistics mStatsObj = StatObjectConverter.convertToMTableColumnStatistics( ensureGetMTable(catName, statsDesc.getDbName(), statsDesc.getTableName()), statsDesc, - statsObj); + statsObj, colStats.getEngine()); writeMTableColumnStatistics(table, mStatsObj, oldStats.get(statsObj.getColName())); // There is no need to add colname again, otherwise we will get duplicate colNames. } @@ -8592,12 +8592,12 @@ private void writeMPartitionColumnStatistics(Table table, Partition partition, * @throws MetaException */ private Map getPartitionColStats(Table table, - String partitionName, List colNames) throws NoSuchObjectException, MetaException { + String partitionName, List colNames, String engine) throws NoSuchObjectException, MetaException { Map statsMap = Maps.newHashMap(); QueryWrapper queryWrapper = new QueryWrapper(); try { List stats = getMPartitionColumnStatistics(table, - Lists.newArrayList(partitionName), colNames, queryWrapper); + Lists.newArrayList(partitionName), colNames, engine, queryWrapper); for(MPartitionColumnStatistics cStat : stats) { statsMap.put(cStat.getColName(), cStat); } @@ -8628,7 +8628,7 @@ private void writeMPartitionColumnStatistics(Table table, Partition partition, } Map oldStats = getPartitionColStats(table, statsDesc - .getPartName(), colNames); + .getPartName(), colNames, colStats.getEngine()); MPartition mPartition = getMPartition( catName, statsDesc.getDbName(), statsDesc.getTableName(), partVals); @@ -8638,7 +8638,7 @@ private void writeMPartitionColumnStatistics(Table table, Partition partition, for (ColumnStatisticsObj statsObj : statsObjs) { MPartitionColumnStatistics mStatsObj = - StatObjectConverter.convertToMPartitionColumnStatistics(mPartition, statsDesc, statsObj); + StatObjectConverter.convertToMPartitionColumnStatistics(mPartition, statsDesc, statsObj, colStats.getEngine()); writeMPartitionColumnStatistics(table, partition, mStatsObj, oldStats.get(statsObj.getColName())); } @@ -8677,7 +8677,7 @@ private void writeMPartitionColumnStatistics(Table table, Partition partition, } } - private List getMTableColumnStatistics(Table table, List colNames, QueryWrapper queryWrapper) + private List getMTableColumnStatistics(Table table, List colNames, String engine, QueryWrapper queryWrapper) throws MetaException { if (colNames == null || colNames.isEmpty()) { return Collections.emptyList(); @@ -8694,16 +8694,17 @@ private void writeMPartitionColumnStatistics(Table table, Partition partition, @Override public List run(List input) throws MetaException { - String filter = "tableName == t1 && dbName == t2 && catName == t3 && ("; - String paramStr = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; - Object[] params = new Object[input.size() + 3]; + String filter = "tableName == t1 && dbName == t2 && catName == t3 && engine == t4 && ("; + String paramStr = "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4"; + Object[] params = new Object[input.size() + 4]; params[0] = table.getTableName(); params[1] = table.getDbName(); params[2] = table.getCatName(); + params[3] = engine; for (int i = 0; i < input.size(); ++i) { filter += ((i == 0) ? "" : " || ") + "colName == c" + i; paramStr += ", java.lang.String c" + i; - params[i + 3] = input.get(i); + params[i + 4] = input.get(i); } filter += ")"; query.setFilter(filter); @@ -8752,14 +8753,51 @@ public void validateTableCols(Table table, List colNames) throws MetaExc } @Override - public ColumnStatistics getTableColumnStatistics( + public List getTableColumnStatistics( String catName, String dbName, String tableName, List colNames) throws MetaException, NoSuchObjectException { // Note: this will get stats without verifying ACID. + boolean committed = false; + Query query = null; + List result = new ArrayList<>(); + + try { + openTransaction(); + query = pm.newQuery(MTableColumnStatistics.class); + query.setResult("engine"); + query.setUnique(true); + Collection names = (Collection) query.execute(); + List engines = new ArrayList<>(); + for (Iterator i = names.iterator(); i.hasNext();) { + engines.add((String) i.next()); + } + for (String e : engines) { + ColumnStatistics cs = getTableColumnStatisticsInternal( + catName, dbName, tableName, colNames, e, true, true); + if (cs != null) { + result.add(cs); + } + } + committed = commitTransaction(); + return result; + } finally { + LOG.debug("Done executing getTableColumnStatistics with status : {}", committed); + rollbackAndCleanup(committed, query); + } + } + + @Override + public ColumnStatistics getTableColumnStatistics( + String catName, + String dbName, + String tableName, + List colNames, + String engine) throws MetaException, NoSuchObjectException { + // Note: this will get stats without verifying ACID. return getTableColumnStatisticsInternal( - catName, dbName, tableName, colNames, true, true); + catName, dbName, tableName, colNames, engine, true, true); } @Override @@ -8768,6 +8806,7 @@ public ColumnStatistics getTableColumnStatistics( String dbName, String tableName, List colNames, + String engine, String writeIdList) throws MetaException, NoSuchObjectException { // If the current stats in the metastore doesn't comply with // the isolation level of the query, set No to the compliance flag. @@ -8778,7 +8817,7 @@ public ColumnStatistics getTableColumnStatistics( || (areTxnStatsSupported && isCurrentStatsValidForTheQuery(table, writeIdList, false)); } ColumnStatistics stats = getTableColumnStatisticsInternal( - catName, dbName, tableName, colNames, true, true); + catName, dbName, tableName, colNames, engine, true, true); if (stats != null && isCompliant != null) { stats.setIsStatsCompliant(isCompliant); } @@ -8786,14 +8825,14 @@ public ColumnStatistics getTableColumnStatistics( } protected ColumnStatistics getTableColumnStatisticsInternal( - String catName, String dbName, String tableName, final List colNames, boolean allowSql, - boolean allowJdo) throws MetaException, NoSuchObjectException { + String catName, String dbName, String tableName, final List colNames, String engine, + boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); return new GetStatHelper(normalizeIdentifier(catName), normalizeIdentifier(dbName), normalizeIdentifier(tableName), allowSql, allowJdo, null) { @Override protected ColumnStatistics getSqlResult(GetHelper ctx) throws MetaException { - return directSql.getTableStats(catName, dbName, tblName, colNames, enableBitVector); + return directSql.getTableStats(catName, dbName, tblName, colNames, engine, enableBitVector); } @Override protected ColumnStatistics getJdoResult( @@ -8802,7 +8841,7 @@ protected ColumnStatistics getJdoResult( try { List mStats = - getMTableColumnStatistics(getTable(), colNames, queryWrapper); + getMTableColumnStatistics(getTable(), colNames, engine, queryWrapper); if (mStats.isEmpty()) { return null; } @@ -8817,7 +8856,7 @@ protected ColumnStatistics getJdoResult( statObjs.add(StatObjectConverter.getTableColumnStatisticsObj(mStat, enableBitVector)); Deadline.checkTimeout(); } - return new ColumnStatistics(desc, statObjs); + return new ColumnStatistics(desc, statObjs, engine); } finally { queryWrapper.close(); } @@ -8826,24 +8865,57 @@ protected ColumnStatistics getJdoResult( } @Override - public List getPartitionColumnStatistics(String catName, String dbName, String tableName, + public List> getPartitionColumnStatistics(String catName, String dbName, String tableName, List partNames, List colNames) throws MetaException, NoSuchObjectException { // Note: this will get stats without verifying ACID. + boolean committed = false; + Query query = null; + List> result = new ArrayList<>(); + + try { + openTransaction(); + query = pm.newQuery(MTableColumnStatistics.class); + query.setResult("engine"); + query.setUnique(true); + Collection names = (Collection) query.execute(); + List engines = new ArrayList<>(); + for (Iterator i = names.iterator(); i.hasNext();) { + engines.add((String) i.next()); + } + for (String e : engines) { + List cs = getPartitionColumnStatisticsInternal( + catName, dbName, tableName, partNames, colNames, e, true, true); + if (cs != null) { + result.add(cs); + } + } + committed = commitTransaction(); + return result; + } finally { + LOG.debug("Done executing getTableColumnStatistics with status : {}", committed); + rollbackAndCleanup(committed, query); + } + } + + @Override + public List getPartitionColumnStatistics(String catName, String dbName, String tableName, + List partNames, List colNames, String engine) throws MetaException, NoSuchObjectException { + // Note: this will get stats without verifying ACID. return getPartitionColumnStatisticsInternal( - catName, dbName, tableName, partNames, colNames, true, true); + catName, dbName, tableName, partNames, colNames, engine, true, true); } @Override public List getPartitionColumnStatistics( String catName, String dbName, String tableName, List partNames, List colNames, - String writeIdList) + String engine, String writeIdList) throws MetaException, NoSuchObjectException { if (partNames == null || partNames.isEmpty()) { return null; } List allStats = getPartitionColumnStatisticsInternal( - catName, dbName, tableName, partNames, colNames, true, true); + catName, dbName, tableName, partNames, colNames, engine, true, true); if (writeIdList != null) { if (!areTxnStatsSupported) { for (ColumnStatistics cs : allStats) { @@ -8873,13 +8945,13 @@ protected ColumnStatistics getJdoResult( protected List getPartitionColumnStatisticsInternal( String catName, String dbName, String tableName, final List partNames, final List colNames, - boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { + String engine, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); return new GetListHelper(catName, dbName, tableName, allowSql, allowJdo) { @Override protected List getSqlResult( GetHelper> ctx) throws MetaException { - return directSql.getPartitionStats(catName, dbName, tblName, partNames, colNames, enableBitVector); + return directSql.getPartitionStats(catName, dbName, tblName, partNames, colNames, engine, enableBitVector); } @Override protected List getJdoResult( @@ -8887,7 +8959,7 @@ protected ColumnStatistics getJdoResult( QueryWrapper queryWrapper = new QueryWrapper(); try { List mStats = - getMPartitionColumnStatistics(getTable(), partNames, colNames, queryWrapper); + getMPartitionColumnStatistics(getTable(), partNames, colNames, engine, queryWrapper); List result = new ArrayList<>( Math.min(mStats.size(), partNames.size())); String lastPartName = null; @@ -8899,7 +8971,7 @@ protected ColumnStatistics getJdoResult( String partName = isLast ? null : mStatsObj.getPartitionName(); if (isLast || !partName.equals(lastPartName)) { if (i != 0) { - result.add(new ColumnStatistics(csd, curList)); + result.add(new ColumnStatistics(csd, curList, engine)); } if (isLast) { continue; @@ -8922,7 +8994,7 @@ protected ColumnStatistics getJdoResult( @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, final List partNames, final List colNames, - String writeIdList) throws MetaException, NoSuchObjectException { + String engine, String writeIdList) throws MetaException, NoSuchObjectException { // If the current stats in the metastore doesn't comply with // the isolation level of the query, return null. if (writeIdList != null) { @@ -8955,12 +9027,12 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam } } } - return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, engine); } @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, - final List partNames, final List colNames) + final List partNames, final List colNames, String engine) throws MetaException, NoSuchObjectException { final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION); @@ -8971,7 +9043,7 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam protected AggrStats getSqlResult(GetHelper ctx) throws MetaException { return directSql.aggrColStatsForPartitions(catName, dbName, tblName, partNames, - colNames, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector); + colNames, engine, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector); } @Override protected AggrStats getJdoResult(GetHelper ctx) @@ -9024,7 +9096,7 @@ public void flushCache() { } private List getMPartitionColumnStatistics( - Table table, List partNames, List colNames, QueryWrapper queryWrapper) + Table table, List partNames, List colNames, String engine, QueryWrapper queryWrapper) throws NoSuchObjectException, MetaException { boolean committed = false; @@ -9038,13 +9110,14 @@ public void flushCache() { LOG.warn("The table does not have the same column definition as its partition."); } Query query = queryWrapper.query = pm.newQuery(MPartitionColumnStatistics.class); - String paramStr = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; - String filter = "tableName == t1 && dbName == t2 && catName == t3 && ("; - Object[] params = new Object[colNames.size() + partNames.size() + 3]; + String paramStr = "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4"; + String filter = "tableName == t1 && dbName == t2 && catName == t3 && engine == t4 && ("; + Object[] params = new Object[colNames.size() + partNames.size() + 4]; int i = 0; params[i++] = table.getTableName(); params[i++] = table.getDbName(); params[i++] = table.isSetCatName() ? table.getCatName() : getDefaultCatalog(conf); + params[i++] = engine; int firstI = i; for (String s : partNames) { filter += ((i == firstI) ? "" : " || ") + "partitionName == p" + i; @@ -9092,8 +9165,7 @@ private void dropPartitionColumnStatisticsNoTxn( @Override public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, - String partName, List partVals, - String colName) + String partName, List partVals, String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { boolean ret = false; Query query = null; @@ -9125,24 +9197,34 @@ public boolean deletePartitionColumnStatistics(String catName, String dbName, St if (colName != null) { filter = "partition.partitionName == t1 && dbName == t2 && tableName == t3 && " - + "colName == t4 && catName == t5"; + + "colName == t4 && catName == t5" + (engine != null ? " && engine == t6" : ""); parameters = "java.lang.String t1, java.lang.String t2, " - + "java.lang.String t3, java.lang.String t4, java.lang.String t5"; + + "java.lang.String t3, java.lang.String t4, java.lang.String t5" + (engine != null ? ", java.lang.String t6" : ""); } else { - filter = "partition.partitionName == t1 && dbName == t2 && tableName == t3 && catName == t4"; - parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4"; + filter = "partition.partitionName == t1 && dbName == t2 && tableName == t3 && catName == t4" + (engine != null ? " && engine == t5" : ""); + parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4" + (engine != null ? ", java.lang.String t5" : ""); } query.setFilter(filter); query.declareParameters(parameters); if (colName != null) { query.setUnique(true); - mStatsObj = - (MPartitionColumnStatistics) query.executeWithArray(partName.trim(), - normalizeIdentifier(dbName), - normalizeIdentifier(tableName), - normalizeIdentifier(colName), - normalizeIdentifier(catName)); + if (engine != null) { + mStatsObj = + (MPartitionColumnStatistics) query.executeWithArray(partName.trim(), + normalizeIdentifier(dbName), + normalizeIdentifier(tableName), + normalizeIdentifier(colName), + normalizeIdentifier(catName), + engine); + } else { + mStatsObj = + (MPartitionColumnStatistics) query.executeWithArray(partName.trim(), + normalizeIdentifier(dbName), + normalizeIdentifier(tableName), + normalizeIdentifier(colName), + normalizeIdentifier(catName)); + } pm.retrieve(mStatsObj); if (mStatsObj != null) { pm.deletePersistent(mStatsObj); @@ -9152,11 +9234,20 @@ public boolean deletePartitionColumnStatistics(String catName, String dbName, St " partition=" + partName + " col=" + colName); } } else { - mStatsObjColl = - (List) query.executeWithArray(partName.trim(), - normalizeIdentifier(dbName), - normalizeIdentifier(tableName), - normalizeIdentifier(catName)); + if (engine != null) { + mStatsObjColl = + (List) query.executeWithArray(partName.trim(), + normalizeIdentifier(dbName), + normalizeIdentifier(tableName), + normalizeIdentifier(catName), + engine); + } else { + mStatsObjColl = + (List) query.executeWithArray(partName.trim(), + normalizeIdentifier(dbName), + normalizeIdentifier(tableName), + normalizeIdentifier(catName)); + } pm.retrieveAll(mStatsObjColl); if (mStatsObjColl != null) { pm.deletePersistentAll(mStatsObjColl); @@ -9177,7 +9268,7 @@ public boolean deletePartitionColumnStatistics(String catName, String dbName, St @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, - String colName) + String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { boolean ret = false; Query query = null; @@ -9202,22 +9293,31 @@ public boolean deleteTableColumnStatistics(String catName, String dbName, String String filter; String parameters; if (colName != null) { - filter = "table.tableName == t1 && dbName == t2 && catName == t3 && colName == t4"; - parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4"; + filter = "table.tableName == t1 && dbName == t2 && catName == t3 && colName == t4" + (engine != null ? " && engine == t5" : ""); + parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4" + (engine != null ? ", java.lang.String t5" : ""); } else { - filter = "table.tableName == t1 && dbName == t2 && catName == t3"; - parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; + filter = "table.tableName == t1 && dbName == t2 && catName == t3" + (engine != null ? " && engine == t4" : ""); + parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3" + (engine != null ? ", java.lang.String t4" : ""); } query.setFilter(filter); query.declareParameters(parameters); if (colName != null) { query.setUnique(true); - mStatsObj = - (MTableColumnStatistics) query.executeWithArray(normalizeIdentifier(tableName), - normalizeIdentifier(dbName), - normalizeIdentifier(catName), - normalizeIdentifier(colName)); + if (engine != null) { + mStatsObj = + (MTableColumnStatistics) query.executeWithArray(normalizeIdentifier(tableName), + normalizeIdentifier(dbName), + normalizeIdentifier(catName), + normalizeIdentifier(colName), + engine); + } else { + mStatsObj = + (MTableColumnStatistics) query.executeWithArray(normalizeIdentifier(tableName), + normalizeIdentifier(dbName), + normalizeIdentifier(catName), + normalizeIdentifier(colName)); + } pm.retrieve(mStatsObj); if (mStatsObj != null) { @@ -9227,11 +9327,20 @@ public boolean deleteTableColumnStatistics(String catName, String dbName, String + tableName + " col=" + colName); } } else { - mStatsObjColl = - (List) query.execute( - normalizeIdentifier(tableName), - normalizeIdentifier(dbName), - normalizeIdentifier(catName)); + if (engine != null) { + mStatsObjColl = + (List) query.executeWithArray( + normalizeIdentifier(tableName), + normalizeIdentifier(dbName), + normalizeIdentifier(catName), + engine); + } else { + mStatsObjColl = + (List) query.executeWithArray( + normalizeIdentifier(tableName), + normalizeIdentifier(dbName), + normalizeIdentifier(catName)); + } pm.retrieveAll(mStatsObjColl); if (mStatsObjColl != null) { pm.deletePersistentAll(mStatsObjColl); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index c5e1a10869..2a4c267dfa 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -933,7 +933,7 @@ Partition getPartitionWithAuth(String catName, String dbName, String tblName, * @throws MetaException error accessing the RDBMS * */ - ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, + List getTableColumnStatistics(String catName, String dbName, String tableName, List colName) throws MetaException, NoSuchObjectException; /** @@ -943,6 +943,23 @@ ColumnStatistics getTableColumnStatistics(String catName, String dbName, String * @param dbName name of the database, defaults to current database * @param tableName name of the table * @param colName names of the columns for which statistics is requested + * @param engine engine requesting the statistics + * @return Relevant column statistics for the column for the given table + * @throws NoSuchObjectException No such table + * @throws MetaException error accessing the RDBMS + * + */ + ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, + List colName, String engine) throws MetaException, NoSuchObjectException; + + /** + * Returns the relevant column statistics for a given column in a given table in a given database + * if such statistics exist. + * @param catName catalog name. + * @param dbName name of the database, defaults to current database + * @param tableName name of the table + * @param colName names of the columns for which statistics is requested + * @param engine engine requesting the statistics * @param writeIdList string format of valid writeId transaction list * @return Relevant column statistics for the column for the given table * @throws NoSuchObjectException No such table @@ -951,7 +968,7 @@ ColumnStatistics getTableColumnStatistics(String catName, String dbName, String */ ColumnStatistics getTableColumnStatistics( String catName, String dbName, String tableName, - List colName, String writeIdList) + List colName, String engine, String writeIdList) throws MetaException, NoSuchObjectException; /** @@ -965,8 +982,8 @@ ColumnStatistics getTableColumnStatistics( * @throws MetaException error accessing the RDBMS * @throws NoSuchObjectException no such partition. */ - List getPartitionColumnStatistics( - String catName, String dbName, String tblName, List partNames, List colNames) + List> getPartitionColumnStatistics( + String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; /** @@ -976,6 +993,23 @@ ColumnStatistics getTableColumnStatistics( * @param tblName table name. * @param partNames list of partition names. These are names so must be key1=val1[/key2=val2...] * @param colNames list of columns to get stats for + * @param engine engine requesting the statistics + * @return list of statistics objects + * @throws MetaException error accessing the RDBMS + * @throws NoSuchObjectException no such partition. + */ + List getPartitionColumnStatistics( + String catName, String dbName, String tblName, List partNames, List colNames, + String engine) throws MetaException, NoSuchObjectException; + + /** + * Get statistics for a partition for a set of columns. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partNames list of partition names. These are names so must be key1=val1[/key2=val2...] + * @param colNames list of columns to get stats for + * @param engine engine requesting the statistics * @param writeIdList string format of valid writeId transaction list * @return list of statistics objects * @throws MetaException error accessing the RDBMS @@ -984,7 +1018,7 @@ ColumnStatistics getTableColumnStatistics( List getPartitionColumnStatistics( String catName, String dbName, String tblName, List partNames, List colNames, - String writeIdList) + String engine, String writeIdList) throws MetaException, NoSuchObjectException; /** @@ -997,6 +1031,7 @@ ColumnStatistics getTableColumnStatistics( * @param partName partition name. * @param partVals partition values. * @param colName column name. + * @param engine engine for which we want to delete statistics * @return Boolean indicating the outcome of the operation * @throws NoSuchObjectException no such partition * @throws MetaException error access the RDBMS @@ -1004,7 +1039,7 @@ ColumnStatistics getTableColumnStatistics( * @throws InvalidInputException bad input, such as null table or database name. */ boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, - String partName, List partVals, String colName) + String partName, List partVals, String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; /** @@ -1013,6 +1048,7 @@ boolean deletePartitionColumnStatistics(String catName, String dbName, String ta * @param dbName database name * @param tableName table name * @param colName column name. Null to delete stats for all columns in the table. + * @param engine engine for which we want to delete statistics * @return true if the statistics were deleted. * @throws NoSuchObjectException no such table or column. * @throws MetaException error access the RDBMS. @@ -1020,7 +1056,7 @@ boolean deletePartitionColumnStatistics(String catName, String dbName, String ta * @throws InvalidInputException bad inputs, such as null table name. */ boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, - String colName) + String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; long cleanupEvents(); @@ -1223,12 +1259,13 @@ void dropFunction(String catName, String dbName, String funcName) * @param partNames list of partition names. These are the names of the partitions, not * values. * @param colNames list of column names + * @param engine engine requesting the statistics * @return aggregated stats * @throws MetaException error accessing RDBMS * @throws NoSuchObjectException no such table or partition */ AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, - List partNames, List colNames) throws MetaException, NoSuchObjectException; + List partNames, List colNames, String engine) throws MetaException, NoSuchObjectException; /** * Get aggregated stats for a table or partition(s). @@ -1238,6 +1275,7 @@ AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, * @param partNames list of partition names. These are the names of the partitions, not * values. * @param colNames list of column names + * @param engine engine requesting the statistics * @param writeIdList string format of valid writeId transaction list * @return aggregated stats * @throws MetaException error accessing RDBMS @@ -1245,7 +1283,7 @@ AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, */ AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames, - String writeIdList) + String engine, String writeIdList) throws MetaException, NoSuchObjectException; /** diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java index 89f0db8495..e8d197abb4 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java @@ -58,7 +58,7 @@ public class StatObjectConverter { // JDO public static MTableColumnStatistics convertToMTableColumnStatistics(MTable table, - ColumnStatisticsDesc statsDesc, ColumnStatisticsObj statsObj) + ColumnStatisticsDesc statsDesc, ColumnStatisticsObj statsObj, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException { if (statsObj == null || statsDesc == null) { throw new InvalidObjectException("Invalid column stats object"); @@ -127,6 +127,7 @@ public static MTableColumnStatistics convertToMTableColumnStatistics(MTable tabl dateStats.isSetLowValue() ? dateStats.getLowValue().getDaysSinceEpoch() : null, dateStats.isSetHighValue() ? dateStats.getHighValue().getDaysSinceEpoch() : null); } + mColStats.setEngine(engine); return mColStats; } @@ -171,6 +172,7 @@ public static void setFieldsIntoOldStats( if (mStatsObj.getNumNulls() != null) { oldStatsObj.setNumNulls(mStatsObj.getNumNulls()); } + oldStatsObj.setEngine(mStatsObj.getEngine()); oldStatsObj.setLastAnalyzed(mStatsObj.getLastAnalyzed()); } @@ -216,6 +218,7 @@ public static void setFieldsIntoOldStats( if (mStatsObj.getNumNulls() != null) { oldStatsObj.setNumNulls(mStatsObj.getNumNulls()); } + oldStatsObj.setEngine(mStatsObj.getEngine()); } public static ColumnStatisticsObj getTableColumnStatisticsObj( @@ -322,7 +325,7 @@ public static ColumnStatisticsDesc getTableColumnStatisticsDesc( } public static MPartitionColumnStatistics convertToMPartitionColumnStatistics( - MPartition partition, ColumnStatisticsDesc statsDesc, ColumnStatisticsObj statsObj) + MPartition partition, ColumnStatisticsDesc statsDesc, ColumnStatisticsObj statsObj, String engine) throws MetaException, NoSuchObjectException { if (statsDesc == null || statsObj == null) { return null; @@ -392,6 +395,7 @@ public static MPartitionColumnStatistics convertToMPartitionColumnStatistics( dateStats.isSetLowValue() ? dateStats.getLowValue().getDaysSinceEpoch() : null, dateStats.isSetHighValue() ? dateStats.getHighValue().getDaysSinceEpoch() : null); } + mColStats.setEngine(engine); return mColStats; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java index bb673f428f..b54a11516f 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java @@ -32,6 +32,14 @@ public class CacheUtils { private static final String delimit = "\u0001"; + /** + * Constant variable that stores engine value needed to store / access + * Hive column statistics. + * TODO: Once CachedStore supports multiple engines, this constant variable + * can be removed. + */ + protected static final String HIVE_ENGINE = "hive"; + public static String buildCatalogKey(String catName) { return catName; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 511e6c1f64..2c7fee6f84 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -98,8 +98,10 @@ // TODO monitor event queue // TODO initial load slow? // TODO size estimation +// TODO Only works with Hive engine for column statistics public class CachedStore implements RawStore, Configurable { + private static ScheduledExecutorService cacheUpdateMaster = null; private static List whitelistPatterns = null; private static List blacklistPatterns = null; @@ -214,11 +216,14 @@ private void initSharedCache(Configuration conf) { private static ColumnStatistics updateStatsForAlterPart(RawStore rawStore, Table before, String catalogName, String dbName, String tableName, Partition part) throws Exception { - ColumnStatistics colStats; List deletedCols = new ArrayList<>(); - colStats = HiveAlterHandler + List multiColumnStats = HiveAlterHandler .updateOrGetPartitionColumnStats(rawStore, catalogName, dbName, tableName, part.getValues(), part.getSd().getCols(), before, part, null, deletedCols); + if (multiColumnStats.size() > 1) { + throw new RuntimeException("CachedStore can only be enabled for Hive engine"); + } + ColumnStatistics colStats = multiColumnStats.isEmpty() ? null : multiColumnStats.get(0); for (String column : deletedCols) { sharedCache.removePartitionColStatsFromCache(catalogName, dbName, tableName, part.getValues(), column); } @@ -240,8 +245,12 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, } } - List statisticsObjs = HiveAlterHandler + List multiColumnStats = HiveAlterHandler .alterTableUpdateTableColumnStats(rawStore, tblBefore, tblAfter, null, null, rawStore.getConf(), deletedCols); + if (multiColumnStats.size() > 1) { + throw new RuntimeException("CachedStore can only be enabled for Hive engine"); + } + List statisticsObjs = multiColumnStats.isEmpty() ? null : multiColumnStats.get(0).getStatsObj(); if (colStats != null) { sharedCache.alterTableAndStatsInCache(catalogName, dbName, tableName, tblAfter.getWriteId(), statisticsObjs, tblAfter.getParameters()); @@ -504,12 +513,12 @@ static void prewarm(RawStore rawStore) { // Get partition column stats for this table Deadline.startTimer("getPartitionColumnStatistics"); partitionColStats = - rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); + rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, CacheUtils.HIVE_ENGINE); Deadline.stopTimer(); // Get aggregate stats for all partitions of a table and for all but default // partition Deadline.startTimer("getAggrPartitionColumnStatistics"); - aggrStatsAllPartitions = rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + aggrStatsAllPartitions = rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, CacheUtils.HIVE_ENGINE); Deadline.stopTimer(); // Remove default partition from partition names and get aggregate // stats again @@ -526,12 +535,12 @@ static void prewarm(RawStore rawStore) { partNames.remove(defaultPartitionName); Deadline.startTimer("getAggrPartitionColumnStatistics"); aggrStatsAllButDefaultPartition = - rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, CacheUtils.HIVE_ENGINE); Deadline.stopTimer(); } } else { Deadline.startTimer("getTableColumnStatistics"); - tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); + tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, CacheUtils.HIVE_ENGINE); Deadline.stopTimer(); } // If the table could not cached due to memory limit, stop prewarm @@ -836,7 +845,7 @@ private void updateTableColStats(RawStore rawStore, String catName, String dbNam if (table != null && !table.isSetPartitionKeys()) { List colNames = MetaStoreUtils.getColumnNamesForTable(table); Deadline.startTimer("getTableColumnStatistics"); - ColumnStatistics tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); + ColumnStatistics tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, CacheUtils.HIVE_ENGINE); Deadline.stopTimer(); if (tableColStats != null) { sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(catName), @@ -888,7 +897,7 @@ private void updateTablePartitionColStats(RawStore rawStore, String catName, Str // Get partition column stats for this table Deadline.startTimer("getPartitionColumnStatistics"); List partitionColStats = - rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); + rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, CacheUtils.HIVE_ENGINE); Deadline.stopTimer(); sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats); Deadline.startTimer("getPartitionsByNames"); @@ -928,7 +937,7 @@ private static void updateTableAggregatePartitionColStats(RawStore rawStore, Str List colNames = MetaStoreUtils.getColumnNamesForTable(table); if ((partNames != null) && (partNames.size() > 0)) { Deadline.startTimer("getAggregareStatsForAllPartitions"); - AggrStats aggrStatsAllPartitions = rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + AggrStats aggrStatsAllPartitions = rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, CacheUtils.HIVE_ENGINE); Deadline.stopTimer(); // Remove default partition from partition names and get aggregate stats again List partKeys = table.getPartitionKeys(); @@ -943,7 +952,7 @@ private static void updateTableAggregatePartitionColStats(RawStore rawStore, Str partNames.remove(defaultPartitionName); Deadline.startTimer("getAggregareStatsForAllPartitionsExceptDefault"); AggrStats aggrStatsAllButDefaultPartition = - rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, CacheUtils.HIVE_ENGINE); Deadline.stopTimer(); sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), aggrStatsAllPartitions, @@ -2010,37 +2019,49 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt return newParams; } - @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName, + @Override public List getTableColumnStatistics(String catName, String dbName, String tblName, List colNames) throws MetaException, NoSuchObjectException { - return getTableColumnStatistics(catName, dbName, tblName, colNames, null); + //TODO + return null; } @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName, - List colNames, String validWriteIds) throws MetaException, NoSuchObjectException { + List colNames, String engine) throws MetaException, NoSuchObjectException { + return getTableColumnStatistics(catName, dbName, tblName, colNames, engine, null); + } + + @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName, + List colNames, String engine, String validWriteIds) throws MetaException, NoSuchObjectException { + if (!CacheUtils.HIVE_ENGINE.equals(engine)) { + throw new RuntimeException("CachedStore can only be enabled for Hive engine"); + } catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { - return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, validWriteIds); + return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, engine, validWriteIds); } Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, validWriteIds); + return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, engine, validWriteIds); } ColumnStatistics columnStatistics = sharedCache.getTableColStatsFromCache(catName, dbName, tblName, colNames, validWriteIds, areTxnStatsSupported); if (columnStatistics == null) { LOG.info("Stat of Table {}.{} for column {} is not present in cache." + "Getting from raw store", dbName, tblName, colNames); - return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, validWriteIds); + return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, engine, validWriteIds); } return columnStatistics; } - @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tblName, String colName) + @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tblName, String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.deleteTableColumnStatistics(catName, dbName, tblName, colName); + if (!CacheUtils.HIVE_ENGINE.equals(engine)) { + throw new RuntimeException("CachedStore can only be enabled for Hive engine"); + } + boolean succ = rawStore.deleteTableColumnStatistics(catName, dbName, tblName, colName, engine); // in case of event based cache update, cache is updated during commit txn if (succ && !canUseEvents) { catName = normalizeIdentifier(catName); @@ -2076,14 +2097,22 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt return newParams; } - @Override public List getPartitionColumnStatistics(String catName, String dbName, String tblName, + @Override public List> getPartitionColumnStatistics(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException { - return getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, null); + // TODO + return null; } @Override public List getPartitionColumnStatistics(String catName, String dbName, String tblName, - List partNames, List colNames, String writeIdList) throws MetaException, NoSuchObjectException { + List partNames, List colNames, String engine) throws MetaException, NoSuchObjectException { + return getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, engine, null); + } + @Override public List getPartitionColumnStatistics(String catName, String dbName, String tblName, + List partNames, List colNames, String engine, String writeIdList) throws MetaException, NoSuchObjectException { + if (!CacheUtils.HIVE_ENGINE.equals(engine)) { + throw new RuntimeException("CachedStore can only be enabled for Hive engine"); + } // If writeIdList is not null, that means stats are requested within a txn context. So set stats compliant to false, // if areTxnStatsSupported is false or the write id which has updated the stats in not compatible with writeIdList. // This is done within table lock as the number of partitions may be more than one and we need a consistent view @@ -2092,15 +2121,18 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt .getPartitionColStatsListFromCache(catName, dbName, tblName, partNames, colNames, writeIdList, areTxnStatsSupported); if (columnStatistics == null) { - return rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, writeIdList); + return rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, engine, writeIdList); } return columnStatistics; } @Override public boolean deletePartitionColumnStatistics(String catName, String dbName, String tblName, - String partName, List partVals, String colName) + String partName, List partVals, String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.deletePartitionColumnStatistics(catName, dbName, tblName, partName, partVals, colName); + if (!CacheUtils.HIVE_ENGINE.equals(engine)) { + throw new RuntimeException("CachedStore can only be enabled for Hive engine"); + } + boolean succ = rawStore.deletePartitionColumnStatistics(catName, dbName, tblName, partName, partVals, colName, engine); // in case of event based cache update, cache is updated during commit txn. if (succ && !canUseEvents) { catName = normalizeIdentifier(catName); @@ -2115,12 +2147,15 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt } @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, - List colNames) throws MetaException, NoSuchObjectException { - return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, null); + List colNames, String engine) throws MetaException, NoSuchObjectException { + return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, engine, null); } @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, - List colNames, String writeIdList) throws MetaException, NoSuchObjectException { + List colNames, String engine, String writeIdList) throws MetaException, NoSuchObjectException { + if (!CacheUtils.HIVE_ENGINE.equals(engine)) { + throw new RuntimeException("CachedStore can only be enabled for Hive engine"); + } List colStats; catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); @@ -2129,12 +2164,12 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt // (incl. due to lack of sync w.r.t. the below rawStore call). // In case the cache is updated using events, aggregate is calculated locally and thus can be read from cache. if (!shouldCacheTable(catName, dbName, tblName) || (writeIdList != null && !canUseEvents)) { - return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, writeIdList); + return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, engine, writeIdList); } Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, writeIdList); + return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, engine, writeIdList); } List allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); @@ -2163,7 +2198,7 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt if (mergedColStats == null) { LOG.info("Aggregate stats of partition " + TableName.getQualified(catName, dbName, tblName) + "." + partNames + " for columns " + colNames + " is not present in cache. Getting it from raw store"); - return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, writeIdList); + return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, engine, writeIdList); } return new AggrStats(mergedColStats.getColStats(), mergedColStats.getPartsFound()); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java index 45b1b0d0bf..500b5b5784 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java @@ -699,7 +699,7 @@ public ColumnStatistics getCachedTableColStats(ColumnStatisticsDesc csd, List partVal, String c return null; } } - ColumnStatistics columnStatistics = new ColumnStatistics(csd, statObject); + ColumnStatistics columnStatistics = new ColumnStatistics(csd, statObject, CacheUtils.HIVE_ENGINE); if (writeIdList != null && TxnUtils.isTransactionalTable(getParameters())) { columnStatistics.setIsStatsCompliant(true); if (!txnStatSupported) { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/DeletePartitionColumnStatEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/DeletePartitionColumnStatEvent.java index d64b57d493..5587752236 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/DeletePartitionColumnStatEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/DeletePartitionColumnStatEvent.java @@ -31,7 +31,7 @@ @InterfaceAudience.Public @InterfaceStability.Stable public class DeletePartitionColumnStatEvent extends ListenerEvent { - private String catName, dbName, tableName, colName, partName; + private String catName, dbName, tableName, colName, partName, engine; private List partVals; @@ -42,10 +42,11 @@ * @param partName partition column name * @param partVals partition value * @param colName column name + * @param engine engine * @param handler handler that is firing the event */ public DeletePartitionColumnStatEvent(String catName, String dbName, String tableName, String partName, - List partVals, String colName, IHMSHandler handler) { + List partVals, String colName, String engine, IHMSHandler handler) { super(true, handler); this.catName = catName; this.dbName = dbName; @@ -53,6 +54,7 @@ public DeletePartitionColumnStatEvent(String catName, String dbName, String tabl this.colName = colName; this.partName = partName; this.partVals = partVals; + this.engine = engine; } public String getCatName() { @@ -78,4 +80,8 @@ public String getPartName() { public List getPartVals() { return partVals; } + + public String getEngine() { + return engine; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/DeleteTableColumnStatEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/DeleteTableColumnStatEvent.java index 7638744281..40f8832971 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/DeleteTableColumnStatEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/DeleteTableColumnStatEvent.java @@ -29,21 +29,23 @@ @InterfaceAudience.Public @InterfaceStability.Stable public class DeleteTableColumnStatEvent extends ListenerEvent { - private String catName, dbName, tableName, colName; + private String catName, dbName, tableName, colName, engine; /** * @param catName catalog name * @param dbName database name * @param tableName table name * @param colName column name + * @param engine engine * @param handler handler that is firing the event */ - public DeleteTableColumnStatEvent(String catName, String dbName, String tableName, String colName, IHMSHandler handler) { + public DeleteTableColumnStatEvent(String catName, String dbName, String tableName, String colName, String engine, IHMSHandler handler) { super(true, handler); this.catName = catName; this.dbName = dbName; this.tableName = tableName; this.colName = colName; + this.engine = engine; } public String getCatName() { @@ -61,4 +63,8 @@ public String getTableName() { public String getColName() { return colName; } + + public String getEngine() { + return engine; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java index 50d9c5b0cf..27accc66da 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java @@ -40,6 +40,7 @@ private String partitionName; private String colName; private String colType; + private String engine; private Long longLowValue; private Long longHighValue; @@ -278,4 +279,12 @@ public void setDecimalHighValue(String decimalHighValue) { public void setBitVector(byte[] bitVector) { this.bitVector = bitVector; } + + public String getEngine() { + return engine; + } + + public void setEngine(String engine) { + this.engine = engine; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java index 731cd6f7fa..81c3f8c1c3 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java @@ -38,6 +38,7 @@ private String tableName; private String colName; private String colType; + private String engine; private Long longLowValue; private Long longHighValue; @@ -269,4 +270,12 @@ public void setDecimalHighValue(String decimalHighValue) { public void setBitVector(byte[] bitVector) { this.bitVector = bitVector; } + + public String getEngine() { + return engine; + } + + public void setEngine(String engine) { + this.engine = engine; + } } diff --git a/standalone-metastore/metastore-server/src/main/resources/package.jdo b/standalone-metastore/metastore-server/src/main/resources/package.jdo index 4586f22429..00783b6c0c 100644 --- a/standalone-metastore/metastore-server/src/main/resources/package.jdo +++ b/standalone-metastore/metastore-server/src/main/resources/package.jdo @@ -1009,6 +1009,9 @@ + + + @@ -1079,7 +1082,11 @@ + + + + diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql index cac59bce97..f57827a880 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql @@ -107,7 +107,8 @@ CREATE TABLE "APP"."TAB_COL_STATS"( "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, - "BIT_VECTOR" BLOB + "BIT_VECTOR" BLOB, + "ENGINE" VARCHAR(128) NOT NULL ); CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB); @@ -156,7 +157,8 @@ CREATE TABLE "APP"."PART_COL_STATS"( "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, - "PART_ID" BIGINT NOT NULL + "PART_ID" BIGINT NOT NULL, + "ENGINE" VARCHAR(128) NOT NULL ); CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255)); diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql index 1a1e34a5c8..438ecd818e 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql @@ -12,6 +12,12 @@ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" (" -- HIVE-21063 CREATE UNIQUE INDEX "APP"."NOTIFICATION_LOG_EVENT_ID" ON "APP"."NOTIFICATION_LOG" ("EVENT_ID"); +-- HIVE-22046 (DEFAULT HIVE) +ALTER TABLE "APP"."TAB_COL_STATS" ADD ENGINE VARCHAR(128); +UPDATE "APP"."TAB_COL_STATS" SET ENGINE = 'hive' WHERE ENGINE IS NULL; +ALTER TABLE "APP"."PART_COL_STATS" ADD ENGINE VARCHAR(128); +UPDATE "APP"."PART_COL_STATS" SET ENGINE = 'hive' WHERE ENGINE IS NULL; + -- This needs to be the last thing done. Insert any changes above this line. UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql index c231368312..221d4f1fff 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql @@ -94,7 +94,8 @@ CREATE TABLE PART_COL_STATS PART_ID bigint NULL, PARTITION_NAME nvarchar(767) NOT NULL, "TABLE_NAME" nvarchar(256) NOT NULL, - "CAT_NAME" nvarchar(256) NOT NULL + "CAT_NAME" nvarchar(256) NOT NULL, + "ENGINE" nvarchar(128) NOT NULL ); ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID); @@ -242,7 +243,8 @@ CREATE TABLE TAB_COL_STATS NUM_TRUES bigint NULL, TBL_ID bigint NULL, "TABLE_NAME" nvarchar(256) NOT NULL, - "CAT_NAME" nvarchar(256) NOT NULL + "CAT_NAME" nvarchar(256) NOT NULL, + "ENGINE" nvarchar(128) NOT NULL ); ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID); diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql index b077b41b47..bc98d5fc4a 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql @@ -16,6 +16,12 @@ CREATE UNIQUE INDEX NOTIFICATION_LOG_EVENT_ID ON NOTIFICATION_LOG (EVENT_ID); -- HIVE-21337 ALTER TABLE "COLUMNS_V2" ALTER COLUMN "COMMENT" nvarchar(4000); +-- HIVE-22046 (DEFAULT HIVE) +ALTER TABLE TAB_COL_STATS ADD ENGINE nvarchar(128); +UPDATE TAB_COL_STATS SET ENGINE = 'hive' WHERE ENGINE IS NULL; +ALTER TABLE PART_COL_STATS ADD ENGINE nvarchar(128); +UPDATE PART_COL_STATS SET ENGINE = 'hive' WHERE ENGINE IS NULL; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql index e8af9a1b11..aca186bae2 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql @@ -733,6 +733,7 @@ CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` ( `NUM_TRUES` bigint(20), `NUM_FALSES` bigint(20), `LAST_ANALYZED` bigint(20) NOT NULL, + `ENGINE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, PRIMARY KEY (`CS_ID`), CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; @@ -764,6 +765,7 @@ CREATE TABLE IF NOT EXISTS `PART_COL_STATS` ( `NUM_TRUES` bigint(20), `NUM_FALSES` bigint(20), `LAST_ANALYZED` bigint(20) NOT NULL, + `ENGINE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, PRIMARY KEY (`CS_ID`), CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql index 296cb12ef0..fdb82eb03c 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql @@ -17,6 +17,12 @@ CREATE UNIQUE INDEX `NOTIFICATION_LOG_EVENT_ID` ON NOTIFICATION_LOG (`EVENT_ID`) -- HIVE-21337 ALTER TABLE COLUMNS_V2 MODIFY COMMENT varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL; +-- HIVE-22046 (DEFAULT HIVE) +ALTER TABLE TAB_COL_STATS ADD ENGINE varchar(128); +UPDATE `TAB_COL_STATS` SET `ENGINE` = 'hive' WHERE `ENGINE` IS NULL; +ALTER TABLE PART_COL_STATS ADD ENGINE varchar(128); +UPDATE `PART_COL_STATS` SET `ENGINE` = 'hive' WHERE `ENGINE` IS NULL; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql index b3c2f792dc..9f4296bd07 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql @@ -543,7 +543,8 @@ CREATE TABLE TAB_COL_STATS ( MAX_COL_LEN NUMBER, NUM_TRUES NUMBER, NUM_FALSES NUMBER, - LAST_ANALYZED NUMBER NOT NULL + LAST_ANALYZED NUMBER NOT NULL, + ENGINE VARCHAR2(128) NOT NULL ); ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID); @@ -583,7 +584,8 @@ CREATE TABLE PART_COL_STATS ( MAX_COL_LEN NUMBER, NUM_TRUES NUMBER, NUM_FALSES NUMBER, - LAST_ANALYZED NUMBER NOT NULL + LAST_ANALYZED NUMBER NOT NULL, + ENGINE VARCHAR2(128) NOT NULL ); ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID); diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql index 27b2bd9165..2dcb6a1dd4 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql @@ -18,6 +18,12 @@ CREATE UNIQUE INDEX NOTIFICATION_LOG_EVENT_ID ON NOTIFICATION_LOG(EVENT_ID); -- HIVE-21337 ALTER TABLE COLUMNS_V2 MODIFY ("COMMENT" VARCHAR2(4000)); +-- HIVE-22046 (DEFAULT HIVE) +ALTER TABLE TAB_COL_STATS ADD ENGINE VARCHAR2(128); +UPDATE TAB_COL_STATS SET ENGINE = 'hive' WHERE ENGINE IS NULL; +ALTER TABLE PART_COL_STATS ADD ENGINE VARCHAR2(128); +UPDATE PART_COL_STATS SET ENGINE = 'hive' WHERE ENGINE IS NULL; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS Status from dual; diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql index db5259959b..c4264f6c2d 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql @@ -553,7 +553,8 @@ CREATE TABLE "TAB_COL_STATS" ( "MAX_COL_LEN" bigint, "NUM_TRUES" bigint, "NUM_FALSES" bigint, - "LAST_ANALYZED" bigint NOT NULL + "LAST_ANALYZED" bigint NOT NULL, + "ENGINE" varying(128) NOT NULL ); -- @@ -591,7 +592,8 @@ CREATE TABLE "PART_COL_STATS" ( "MAX_COL_LEN" bigint, "NUM_TRUES" bigint, "NUM_FALSES" bigint, - "LAST_ANALYZED" bigint NOT NULL + "LAST_ANALYZED" bigint NOT NULL, + "ENGINE" varying(128) NOT NULL ); -- diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql index 2d4363b3d6..560109fa36 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql @@ -14,6 +14,12 @@ ALTER TABLE ONLY "WM_RESOURCEPLAN" ADD CONSTRAINT "UNIQUE_WM_RESOURCEPLAN" UNIQU -- HIVE-21063 CREATE UNIQUE INDEX "NOTIFICATION_LOG_EVENT_ID" ON "NOTIFICATION_LOG" USING btree ("EVENT_ID"); +-- HIVE-22046 (DEFAULT HIVE) +ALTER TABLE "TAB_COL_STATS" ADD "ENGINE" character varying(128); +UPDATE "TAB_COL_STATS" SET "ENGINE" = 'hive' WHERE "ENGINE" IS NULL; +ALTER TABLE "PART_COL_STATS" ADD "ENGINE" character varying(128); +UPDATE "PART_COL_STATS" SET "ENGINE" = 'hive' WHERE "ENGINE" IS NULL; + -- These lines need to be last. Insert any changes above. UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0'; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index da3c42a1d5..fc261ec918 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -689,34 +689,39 @@ public long cleanupEvents() { } @Override - public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, + public List getTableColumnStatistics(String catName, String dbName, String tableName, List colNames) throws MetaException, NoSuchObjectException { return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames); } + @Override + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, + List colNames, String engine) throws MetaException, NoSuchObjectException { + return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames, engine); + } + @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, - String tableName, List colNames, - String writeIdList) + String tableName, List colNames, String engine, String writeIdList) throws MetaException, NoSuchObjectException { return objectStore.getTableColumnStatistics( - catName, dbName, tableName, colNames, writeIdList); + catName, dbName, tableName, colNames, engine, writeIdList); } @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, - String colName) + String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - return objectStore.deleteTableColumnStatistics(catName, dbName, tableName, colName); + return objectStore.deleteTableColumnStatistics(catName, dbName, tableName, colName, engine); } @Override public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, - String partName, List partVals, String colName) + String partName, List partVals, String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { return objectStore.deletePartitionColumnStatistics(catName, dbName, tableName, partName, - partVals, colName); + partVals, colName, engine); } @Override @@ -789,19 +794,26 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } @Override - public List getPartitionColumnStatistics(String catName, String dbName, + public List> getPartitionColumnStatistics(String catName, String dbName, String tblName, List colNames, List partNames) throws MetaException, NoSuchObjectException { return objectStore.getPartitionColumnStatistics(catName, dbName, tblName , colNames, partNames); } + @Override + public List getPartitionColumnStatistics(String catName, String dbName, + String tblName, List colNames, List partNames, String engine) + throws MetaException, NoSuchObjectException { + return objectStore.getPartitionColumnStatistics(catName, dbName, tblName, colNames, partNames, engine); + } + @Override public List getPartitionColumnStatistics( String catName, String dbName, String tblName, List partNames, - List colNames, String writeIdList) + List colNames, String engine, String writeIdList) throws MetaException, NoSuchObjectException { return objectStore.getPartitionColumnStatistics( - catName, dbName, tblName , colNames, partNames, writeIdList); + catName, dbName, tblName , colNames, partNames, engine, writeIdList); } @Override @@ -867,16 +879,15 @@ public Function getFunction(String catName, String dbName, String funcName) @Override public AggrStats get_aggr_stats_for(String catName, String dbName, - String tblName, List partNames, List colNames) + String tblName, List partNames, List colNames, String engine) throws MetaException { return null; } @Override public AggrStats get_aggr_stats_for(String catName, String dbName, - String tblName, List partNames, - List colNames, - String writeIdList) + String tblName, List partNames, List colNames, + String engine, String writeIdList) throws MetaException, NoSuchObjectException { return null; } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index a018c503d1..3af611ed19 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -729,22 +729,28 @@ public boolean removeMasterKey(Integer keySeq) { } @Override - public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, + public List getTableColumnStatistics(String catName, String dbName, String tableName, List colName) throws MetaException, NoSuchObjectException { return null; } + @Override + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, + List colName, String engine) throws MetaException, NoSuchObjectException { + return null; + } + @Override public ColumnStatistics getTableColumnStatistics( String catName, String dbName, String tableName, List colName, - String writeIdList) + String engine, String writeIdList) throws MetaException, NoSuchObjectException { return null; } @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, - String colName) + String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException { return false; } @@ -752,7 +758,7 @@ public boolean deleteTableColumnStatistics(String catName, String dbName, String @Override public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, - String partName, List partVals, String colName) + String partName, List partVals, String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { return false; @@ -787,16 +793,23 @@ public void setMetaStoreSchemaVersion(String version, String comment) throws Met } @Override - public List getPartitionColumnStatistics(String catName, String dbName, + public List> getPartitionColumnStatistics(String catName, String dbName, String tblName, List colNames, List partNames) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } + @Override + public List getPartitionColumnStatistics(String catName, String dbName, + String tblName, List colNames, List partNames, String engine) + throws MetaException, NoSuchObjectException { + return Collections.emptyList(); + } + @Override public List getPartitionColumnStatistics( String catName, String dbName, String tblName, List partNames, - List colNames, String writeIdList) + List colNames, String engine, String writeIdList) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @@ -859,7 +872,8 @@ public Function getFunction(String catName, String dbName, String funcName) @Override public AggrStats get_aggr_stats_for(String catName, String dbName, - String tblName, List partNames, List colNames) + String tblName, List partNames, List colNames, + String engine) throws MetaException { return null; } @@ -867,7 +881,7 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, @Override public AggrStats get_aggr_stats_for( String catName, String dbName, String tblName, List partNames, - List colNames, String writeIdList) + List colNames, String engine, String writeIdList) throws MetaException, NoSuchObjectException { return null; } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 4878a47b2d..9f3cad8ed3 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -1390,16 +1390,19 @@ public Partition getPartition(String db_name, String tbl_name, @Override public List getPartitionsByNames(String db_name, String tbl_name, List part_names) throws NoSuchObjectException, MetaException, TException { - return getPartitionsByNames(db_name, tbl_name, part_names, false); + return getPartitionsByNames(db_name, tbl_name, part_names, false, null); } @Override public List getPartitionsByNames(String db_name, String tbl_name, - List part_names, boolean get_col_stats) + List part_names, boolean get_col_stats, String engine) throws NoSuchObjectException, MetaException, TException { GetPartitionsByNamesRequest gpbnr = new GetPartitionsByNamesRequest(db_name, tbl_name); gpbnr.setNames(part_names); gpbnr.setGet_col_stats(get_col_stats); + if (get_col_stats) { + gpbnr.setEngine(engine); + } List parts = client.get_partitions_by_names_req(gpbnr).getPartitions(); return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); } @@ -1770,17 +1773,17 @@ public void flushCache() { /** {@inheritDoc} */ @Override public List getTableColumnStatistics(String dbName, String tableName, - List colNames) throws NoSuchObjectException, MetaException, TException, + List colNames, String engine) throws NoSuchObjectException, MetaException, TException, InvalidInputException, InvalidObjectException { return client.get_table_statistics_req( - new TableStatsRequest(dbName, tableName, colNames)).getTableStats(); + new TableStatsRequest(dbName, tableName, colNames, engine)).getTableStats(); } @Override public List getTableColumnStatistics( - String dbName, String tableName, List colNames, String validWriteIdList) + String dbName, String tableName, List colNames, String engine, String validWriteIdList) throws NoSuchObjectException, MetaException, TException { - TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames); + TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames, engine); tsr.setValidWriteIdList(validWriteIdList); return client.get_table_statistics_req(tsr).getTableStats(); @@ -1789,18 +1792,18 @@ public void flushCache() { /** {@inheritDoc} */ @Override public Map> getPartitionColumnStatistics( - String dbName, String tableName, List partNames, List colNames) + String dbName, String tableName, List partNames, List colNames, String engine) throws NoSuchObjectException, MetaException, TException { return client.get_partitions_statistics_req( - new PartitionsStatsRequest(dbName, tableName, colNames, partNames)).getPartStats(); + new PartitionsStatsRequest(dbName, tableName, colNames, partNames, engine)).getPartStats(); } @Override public Map> getPartitionColumnStatistics( String dbName, String tableName, List partNames, - List colNames, String validWriteIdList) + List colNames, String engine, String validWriteIdList) throws NoSuchObjectException, MetaException, TException { - PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName, colNames, partNames); + PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName, colNames, partNames, engine); psr.setValidWriteIdList(validWriteIdList); return client.get_partitions_statistics_req( psr).getPartStats(); @@ -1809,19 +1812,19 @@ public void flushCache() { /** {@inheritDoc} */ @Override public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, - String colName) throws NoSuchObjectException, InvalidObjectException, MetaException, + String colName, String engine) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException { - return client.delete_partition_column_statistics(dbName, tableName, partName, colName); + return client.delete_partition_column_statistics(dbName, tableName, partName, colName, engine); } /** {@inheritDoc} */ @Override - public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) + public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName, String engine) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException { - return client.delete_table_column_statistics(dbName, tableName, colName); + return client.delete_table_column_statistics(dbName, tableName, colName, engine); } /** @@ -2688,25 +2691,25 @@ protected void drop_table_with_environment_context(String dbname, String name, @Override public AggrStats getAggrColStatsFor(String dbName, String tblName, - List colNames, List partNames) throws NoSuchObjectException, MetaException, TException { + List colNames, List partNames, String engine) throws NoSuchObjectException, MetaException, TException { if (colNames.isEmpty() || partNames.isEmpty()) { LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate } - PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames); + PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames, engine); return client.get_aggr_stats_for(req); } @Override public AggrStats getAggrColStatsFor( String dbName, String tblName, List colNames, - List partName, String writeIdList) + List partName, String engine, String writeIdList) throws NoSuchObjectException, MetaException, TException { if (colNames.isEmpty() || partName.isEmpty()) { LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate } - PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partName); + PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partName, engine); req.setValidWriteIdList(writeIdList); return client.get_aggr_stats_for(req); } @@ -3132,7 +3135,7 @@ public Table getTable(String catName, String dbName, String tableName) throws Me } @Override - public Table getTable(String catName, String dbName, boolean getColumnStats) throws MetaException, + public Table getTable(String catName, String dbName, boolean getColumnStats, String engine) throws MetaException, TException { throw new UnsupportedOperationException(); } @@ -3145,7 +3148,7 @@ public Table getTable(String catName, String dbName, String tableName, @Override public Table getTable(String catName, String dbName, String tableName, - String validWriteIdList, boolean getColumnStats) throws TException { + String validWriteIdList, boolean getColumnStats, String engine) throws TException { throw new UnsupportedOperationException(); } @@ -3293,7 +3296,7 @@ public boolean listPartitionsByExpr(String catName, String db_name, String tbl_n @Override public List getPartitionsByNames(String catName, String db_name, String tbl_name, - List part_names, boolean getColStats) + List part_names, boolean getColStats, String engine) throws NoSuchObjectException, MetaException, TException { throw new UnsupportedOperationException(); } @@ -3411,7 +3414,8 @@ public void renamePartition(String catName, String dbname, String tableName, @Override public List getTableColumnStatistics(String catName, String dbName, String tableName, - List colNames) throws + List colNames, + String engine) throws NoSuchObjectException, MetaException, TException { throw new UnsupportedOperationException(); } @@ -3419,7 +3423,7 @@ public void renamePartition(String catName, String dbname, String tableName, @Override public List getTableColumnStatistics( String catName, String dbName, String tableName, List colNames, - String validWriteIdList) + String engine, String validWriteIdList) throws NoSuchObjectException, MetaException, TException { throw new UnsupportedOperationException(); } @@ -3429,7 +3433,8 @@ public void renamePartition(String catName, String dbname, String tableName, String dbName, String tableName, List partNames, - List colNames) throws + List colNames, + String engine) throws NoSuchObjectException, MetaException, TException { throw new UnsupportedOperationException(); } @@ -3437,14 +3442,14 @@ public void renamePartition(String catName, String dbname, String tableName, @Override public Map> getPartitionColumnStatistics( String catName, String dbName, String tableName, List partNames, - List colNames, String validWriteIdList) + List colNames, String engine, String validWriteIdList) throws NoSuchObjectException, MetaException, TException { throw new UnsupportedOperationException(); } @Override public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, - String partName, String colName) throws + String partName, String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException { throw new UnsupportedOperationException(); @@ -3452,7 +3457,7 @@ public boolean deletePartitionColumnStatistics(String catName, String dbName, St @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, - String colName) throws NoSuchObjectException, + String colName, String engine) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException { throw new UnsupportedOperationException(); } @@ -3484,15 +3489,14 @@ public Function getFunction(String catName, String dbName, String funcName) thro @Override public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, - List colNames, List partNames) throws + List colNames, List partNames, String engine) throws NoSuchObjectException, MetaException, TException { throw new UnsupportedOperationException(); } @Override public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, - List colNames, List partNames, - String writeIdList) + List colNames, List partNames, String engine, String writeIdList) throws NoSuchObjectException, MetaException, TException { throw new UnsupportedOperationException(); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 0c4c84ce0b..c134929d93 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -119,6 +119,7 @@ protected static Warehouse warehouse; protected static boolean isThriftClient = false; + private static final String ENGINE = "hive"; private static final String TEST_DB1_NAME = "testdb1"; private static final String TEST_DB2_NAME = "testdb2"; @@ -1586,28 +1587,28 @@ public void testStatsFastTrivial() throws Throwable { assertEquals(4,partNames.size()); // Test for both colNames and partNames being empty: - AggrStats aggrStatsEmpty = client.getAggrColStatsFor(dbName,tblName,emptyColNames,emptyPartNames); + AggrStats aggrStatsEmpty = client.getAggrColStatsFor(dbName,tblName,emptyColNames,emptyPartNames, ENGINE); assertNotNull(aggrStatsEmpty); // short-circuited on client-side, verifying that it's an empty object, not null assertEquals(0,aggrStatsEmpty.getPartsFound()); assertNotNull(aggrStatsEmpty.getColStats()); assert(aggrStatsEmpty.getColStats().isEmpty()); // Test for only colNames being empty - AggrStats aggrStatsOnlyParts = client.getAggrColStatsFor(dbName,tblName,emptyColNames,partNames); + AggrStats aggrStatsOnlyParts = client.getAggrColStatsFor(dbName,tblName,emptyColNames,partNames, ENGINE); assertNotNull(aggrStatsOnlyParts); // short-circuited on client-side, verifying that it's an empty object, not null assertEquals(0,aggrStatsOnlyParts.getPartsFound()); assertNotNull(aggrStatsOnlyParts.getColStats()); assert(aggrStatsOnlyParts.getColStats().isEmpty()); // Test for only partNames being empty - AggrStats aggrStatsOnlyCols = client.getAggrColStatsFor(dbName,tblName,colNames,emptyPartNames); + AggrStats aggrStatsOnlyCols = client.getAggrColStatsFor(dbName,tblName,colNames,emptyPartNames, ENGINE); assertNotNull(aggrStatsOnlyCols); // short-circuited on client-side, verifying that it's an empty object, not null assertEquals(0,aggrStatsOnlyCols.getPartsFound()); assertNotNull(aggrStatsOnlyCols.getColStats()); assert(aggrStatsOnlyCols.getColStats().isEmpty()); // Test for valid values for both. - AggrStats aggrStatsFull = client.getAggrColStatsFor(dbName,tblName,colNames,partNames); + AggrStats aggrStatsFull = client.getAggrColStatsFor(dbName,tblName,colNames,partNames, ENGINE); assertNotNull(aggrStatsFull); assertEquals(0,aggrStatsFull.getPartsFound()); // would still be empty, because no stats are actually populated. assertNotNull(aggrStatsFull.getColStats()); @@ -1684,13 +1685,14 @@ public void testColumnStatistics() throws Throwable { ColumnStatistics colStats = new ColumnStatistics(); colStats.setStatsDesc(statsDesc); colStats.setStatsObj(statsObjs); + colStats.setEngine(ENGINE); // write stats objs persistently client.updateTableColumnStatistics(colStats); // retrieve the stats obj that was just written ColumnStatisticsObj colStats2 = client.getTableColumnStatistics( - dbName, tblName, Lists.newArrayList(colName[0])).get(0); + dbName, tblName, Lists.newArrayList(colName[0]), ENGINE).get(0); // compare stats obj to ensure what we get is what we wrote assertNotNull(colStats2); @@ -1702,11 +1704,11 @@ public void testColumnStatistics() throws Throwable { // test delete column stats; if no col name is passed all column stats associated with the // table is deleted - boolean status = client.deleteTableColumnStatistics(dbName, tblName, null); + boolean status = client.deleteTableColumnStatistics(dbName, tblName, null, ENGINE); assertTrue(status); // try to query stats for a column for which stats doesn't exist assertTrue(client.getTableColumnStatistics( - dbName, tblName, Lists.newArrayList(colName[1])).isEmpty()); + dbName, tblName, Lists.newArrayList(colName[1]), ENGINE).isEmpty()); colStats.setStatsDesc(statsDesc); colStats.setStatsObj(statsObjs); @@ -1716,7 +1718,7 @@ public void testColumnStatistics() throws Throwable { // query column stats for column whose stats were updated in the previous call colStats2 = client.getTableColumnStatistics( - dbName, tblName, Lists.newArrayList(colName[0])).get(0); + dbName, tblName, Lists.newArrayList(colName[0]), ENGINE).get(0); // partition level column statistics test // create a table with multiple partitions @@ -1745,11 +1747,12 @@ public void testColumnStatistics() throws Throwable { colStats = new ColumnStatistics(); colStats.setStatsDesc(statsDesc); colStats.setStatsObj(statsObjs); + colStats.setEngine(ENGINE); client.updatePartitionColumnStatistics(colStats); colStats2 = client.getPartitionColumnStatistics(dbName, tblName, - Lists.newArrayList(partName), Lists.newArrayList(colName[1])).get(partName).get(0); + Lists.newArrayList(partName), Lists.newArrayList(colName[1]), ENGINE).get(partName).get(0); // compare stats obj to ensure what we get is what we wrote assertNotNull(colStats2); @@ -1761,14 +1764,14 @@ public void testColumnStatistics() throws Throwable { assertEquals(colStats2.getStatsData().getStringStats().getNumDVs(), numDVs); // test stats deletion at partition level - client.deletePartitionColumnStatistics(dbName, tblName, partName, colName[1]); + client.deletePartitionColumnStatistics(dbName, tblName, partName, colName[1], ENGINE); colStats2 = client.getPartitionColumnStatistics(dbName, tblName, - Lists.newArrayList(partName), Lists.newArrayList(colName[0])).get(partName).get(0); + Lists.newArrayList(partName), Lists.newArrayList(colName[0]), ENGINE).get(partName).get(0); // test get stats on a column for which stats doesn't exist assertTrue(client.getPartitionColumnStatistics(dbName, tblName, - Lists.newArrayList(partName), Lists.newArrayList(colName[1])).isEmpty()); + Lists.newArrayList(partName), Lists.newArrayList(colName[1]), ENGINE).isEmpty()); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testColumnStatistics() failed."); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index 1f7f69a86a..f62db7d63c 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -113,6 +113,7 @@ private ObjectStore objectStore = null; private Configuration conf; + private static final String ENGINE = "hive"; private static final String DB1 = "testobjectstoredb1"; private static final String DB2 = "testobjectstoredb2"; private static final String TABLE1 = "testobjectstoretable1"; @@ -645,6 +646,7 @@ private void createPartitionedTable(boolean withPrivileges, boolean withStatisti List statsObjList = new ArrayList<>(1); stats.setStatsObj(statsObjList); + stats.setEngine(ENGINE); ColumnStatisticsData data = new ColumnStatisticsData(); BooleanColumnStatsData boolStats = new BooleanColumnStatsData(); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java index 27c5bba5f7..8e35235925 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java @@ -64,6 +64,8 @@ private static final Logger LOG = LoggerFactory.getLogger(TestOldSchema.class.getName()); + private static final String ENGINE = "hive"; + public static class MockPartitionExpressionProxy implements PartitionExpressionProxy { @Override public String convertExprToFilter(byte[] expr, String defaultPartitionName) throws MetaException { @@ -175,6 +177,7 @@ public void testPartitionOps() throws Exception { data.setLongStats(dcsd); obj.setStatsData(data); cs.addToStatsObj(obj); + cs.setEngine(ENGINE); store.updatePartitionColumnStatistics(cs, partVal, null, -1); } @@ -199,7 +202,7 @@ public void checkStats(AggrStats aggrStats) throws Exception { partNames.add("ds=" + i); } AggrStats aggrStats = store.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tableName, partNames, - Arrays.asList("col1")); + Arrays.asList("col1"), ENGINE); statChecker.checkStats(aggrStats); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java index 03378ba83f..3fdb4776ee 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java @@ -69,6 +69,7 @@ public class TestStats { private static final Logger LOG = LoggerFactory.getLogger(TestStats.class); + private static final String ENGINE = "hive"; private static final String NO_CAT = "DO_NOT_USE_A_CATALOG!"; private IMetaStoreClient client; @@ -178,6 +179,7 @@ public void tearDown() throws TException { partNames.add(partName); } } + rqst.setEngine(ENGINE); client.setPartitionColumnStatistics(rqst); return partNames; } @@ -193,7 +195,7 @@ private ColumnStatistics buildStatsForOneTableOrPartition(String catName, String for (Column col : cols) objs.add(col.generate()); - return new ColumnStatistics(desc, objs); + return new ColumnStatistics(desc, objs, ENGINE); } private void dropStats(String catName, String dbName, String tableName, String partName, @@ -201,11 +203,11 @@ private void dropStats(String catName, String dbName, String tableName, String p throws TException { for (String colName : colNames) { if (partName == null) { - if (NO_CAT.equals(catName)) client.deleteTableColumnStatistics(dbName, tableName, colName); - else client.deleteTableColumnStatistics(catName, dbName, tableName, colName); + if (NO_CAT.equals(catName)) client.deleteTableColumnStatistics(dbName, tableName, colName, ENGINE); + else client.deleteTableColumnStatistics(catName, dbName, tableName, colName, ENGINE); } else { - if (NO_CAT.equals(catName)) client.deletePartitionColumnStatistics(dbName, tableName, partName, colName); - else client.deletePartitionColumnStatistics(catName, dbName, tableName, partName, colName); + if (NO_CAT.equals(catName)) client.deletePartitionColumnStatistics(dbName, tableName, partName, colName, ENGINE); + else client.deletePartitionColumnStatistics(catName, dbName, tableName, partName, colName, ENGINE); } } } @@ -213,21 +215,21 @@ private void dropStats(String catName, String dbName, String tableName, String p private void compareStatsForTable(String catName, String dbName, String tableName, Map colMap) throws TException { List objs = catName.equals(NO_CAT) ? - client.getTableColumnStatistics(dbName, tableName, new ArrayList<>(colMap.keySet())) : - client.getTableColumnStatistics(catName, dbName, tableName, new ArrayList<>(colMap.keySet())); + client.getTableColumnStatistics(dbName, tableName, new ArrayList<>(colMap.keySet()), ENGINE) : + client.getTableColumnStatistics(catName, dbName, tableName, new ArrayList<>(colMap.keySet()), ENGINE); compareStatsForOneTableOrPartition(objs, 0, colMap); // Test the statistics obtained through getTable call. Table table = catName.equals(NO_CAT) ? - client.getTable(dbName, tableName, true) : - client.getTable(catName, dbName, tableName, null, true); + client.getTable(dbName, tableName, true, ENGINE) : + client.getTable(catName, dbName, tableName, null, true, ENGINE); Assert.assertTrue(table.isSetColStats()); compareStatsForOneTableOrPartition(table.getColStats().getStatsObj(), 0, colMap); // Test that getTable call doesn't get the statistics when not explicitly requested. table = catName.equals(NO_CAT) ? - client.getTable(dbName, tableName, false) : - client.getTable(catName, dbName, tableName, null, false); + client.getTable(dbName, tableName, false, null) : + client.getTable(catName, dbName, tableName, null, false, null); Assert.assertFalse(table.isSetColStats()); } @@ -235,14 +237,14 @@ private void compareStatsForPartitions(String catName, String dbName, String tab List partNames, final Map colMap) throws TException { Map> partObjs = catName.equals(NO_CAT) ? - client.getPartitionColumnStatistics(dbName, tableName, partNames, new ArrayList<>(colMap.keySet())) : - client.getPartitionColumnStatistics(catName, dbName, tableName, partNames, new ArrayList<>(colMap.keySet())); + client.getPartitionColumnStatistics(dbName, tableName, partNames, new ArrayList<>(colMap.keySet()), ENGINE) : + client.getPartitionColumnStatistics(catName, dbName, tableName, partNames, new ArrayList<>(colMap.keySet()), ENGINE); for (int i = 0; i < partNames.size(); i++) { compareStatsForOneTableOrPartition(partObjs.get(partNames.get(i)), i, colMap); } AggrStats aggr = catName.equals(NO_CAT) ? - client.getAggrColStatsFor(dbName, tableName, new ArrayList<>(colMap.keySet()), partNames) : - client.getAggrColStatsFor(catName, dbName, tableName, new ArrayList<>(colMap.keySet()), partNames); + client.getAggrColStatsFor(dbName, tableName, new ArrayList<>(colMap.keySet()), partNames, ENGINE) : + client.getAggrColStatsFor(catName, dbName, tableName, new ArrayList<>(colMap.keySet()), partNames, ENGINE); Assert.assertEquals(partNames.size(), aggr.getPartsFound()); Assert.assertEquals(colMap.size(), aggr.getColStatsSize()); aggr.getColStats().forEach(cso -> colMap.get(cso.getColName()).compareAggr(cso)); @@ -252,18 +254,18 @@ private void compareStatsForPartitions(String catName, String dbName, String tab String partName = partNames.get(i); List partitions = catName.equals(NO_CAT) ? client.getPartitionsByNames(dbName, tableName, Collections.singletonList(partName), - true) : + true, ENGINE) : client.getPartitionsByNames(catName, dbName, tableName, - Collections.singletonList(partName), true); + Collections.singletonList(partName), true, ENGINE); Partition partition = partitions.get(0); compareStatsForOneTableOrPartition(partition.getColStats().getStatsObj(), i, colMap); // Also test that we do not get statistics when not requested partitions = catName.equals(NO_CAT) ? client.getPartitionsByNames(dbName, tableName, Collections.singletonList(partName), - true) : + true, ENGINE) : client.getPartitionsByNames(catName, dbName, tableName, - Collections.singletonList(partName), true); + Collections.singletonList(partName), true, ENGINE); partition = partitions.get(0); Assert.assertFalse(partition.isSetColStats()); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java index c9a6a471cb..c5cbeb34f0 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java @@ -101,23 +101,23 @@ public boolean getPartitionsByExpr(String catName, String dbName, String tblName @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, - String tableName, List colNames) throws MetaException, NoSuchObjectException { + String tableName, List colNames, String engine) throws MetaException, NoSuchObjectException { ColumnStatistics sqlResult = getTableColumnStatisticsInternal( - catName, dbName, tableName, colNames, true, false); + catName, dbName, tableName, colNames, engine, true, false); ColumnStatistics jdoResult = getTableColumnStatisticsInternal( - catName, dbName, tableName, colNames, false, true); + catName, dbName, tableName, colNames, engine, false, true); verifyObjects(sqlResult, jdoResult, ColumnStatistics.class); return sqlResult; } @Override public List getPartitionColumnStatistics(String catName, String dbName, - String tableName, List partNames, List colNames) + String tableName, List partNames, List colNames, String engine) throws MetaException, NoSuchObjectException { List sqlResult = getPartitionColumnStatisticsInternal( - catName, dbName, tableName, partNames, colNames, true, false); + catName, dbName, tableName, partNames, colNames, engine, true, false); List jdoResult = getPartitionColumnStatisticsInternal( - catName, dbName, tableName, partNames, colNames, false, true); + catName, dbName, tableName, partNames, colNames, engine, false, true); verifyLists(sqlResult, jdoResult, ColumnStatistics.class); return sqlResult; } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java index 420369d792..3ac57505b4 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java @@ -970,6 +970,7 @@ public void testAggrStatsRepeatedRead() throws Exception { stats.setStatsDesc(statsDesc); stats.setStatsObj(colStatObjs); + stats.setEngine(CacheUtils.HIVE_ENGINE); cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, null, -1); cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, null, -1); @@ -979,15 +980,15 @@ public void testAggrStatsRepeatedRead() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, CacheUtils.HIVE_ENGINE); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); - aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, CacheUtils.HIVE_ENGINE); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); objectStore.deletePartitionColumnStatistics(DEFAULT_CATALOG_NAME, db.getName(), tbl.getTableName(), - Warehouse.makePartName(tbl.getPartitionKeys(), partVals1), partVals1, colName); + Warehouse.makePartName(tbl.getPartitionKeys(), partVals1), partVals1, colName, CacheUtils.HIVE_ENGINE); objectStore.deletePartitionColumnStatistics(DEFAULT_CATALOG_NAME, db.getName(), tbl.getTableName(), - Warehouse.makePartName(tbl.getPartitionKeys(), partVals2), partVals2, colName); + Warehouse.makePartName(tbl.getPartitionKeys(), partVals2), partVals2, colName, CacheUtils.HIVE_ENGINE); objectStore.dropPartition(DEFAULT_CATALOG_NAME, db.getName(), tbl.getTableName(), partVals1); objectStore.dropPartition(DEFAULT_CATALOG_NAME, db.getName(), tbl.getTableName(), partVals2); objectStore.dropTable(DEFAULT_CATALOG_NAME, db.getName(), tbl.getTableName()); @@ -1053,6 +1054,7 @@ public void testPartitionAggrStats() throws Exception { stats.setStatsDesc(statsDesc); stats.setStatsObj(colStatObjs); + stats.setEngine(CacheUtils.HIVE_ENGINE); cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, null, -1); @@ -1064,10 +1066,10 @@ public void testPartitionAggrStats() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, CacheUtils.HIVE_ENGINE); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40); - aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, CacheUtils.HIVE_ENGINE); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40); cachedStore.shutdown(); @@ -1139,6 +1141,7 @@ public void testPartitionAggrStatsBitVector() throws Exception { stats.setStatsDesc(statsDesc); stats.setStatsObj(colStatObjs); + stats.setEngine(CacheUtils.HIVE_ENGINE); cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, null, -1); @@ -1157,10 +1160,10 @@ public void testPartitionAggrStatsBitVector() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, CacheUtils.HIVE_ENGINE); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5); - aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, CacheUtils.HIVE_ENGINE); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5); cachedStore.shutdown(); @@ -1702,6 +1705,7 @@ private TableAndColStats createUnpartitionedTableObjectWithColStats(Database db) stats.setStatsDesc(statsDesc); stats.setStatsObj(colStatObjList); + stats.setEngine(CacheUtils.HIVE_ENGINE); return new TableAndColStats(tbl, stats); }