diff --git a/beeline/pom.xml b/beeline/pom.xml
index 19ec53eba6..0bf065d802 100644
--- a/beeline/pom.xml
+++ b/beeline/pom.xml
@@ -105,6 +105,12 @@
tests
test
+
+ org.apache.hive.hcatalog
+ hive-hcatalog-server-extensions
+ ${project.version}
+ test
+
org.apache.hive
hive-service
diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
index e6113942dd..169041349c 100644
--- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
+++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
@@ -759,7 +759,8 @@ public void onUpdateTableColumnStat(UpdateTableColumnStatEvent updateTableColumn
.buildUpdateTableColumnStatMessage(updateTableColumnStatEvent.getColStats(),
updateTableColumnStatEvent.getTableObj(),
updateTableColumnStatEvent.getTableParameters(),
- updateTableColumnStatEvent.getWriteId());
+ updateTableColumnStatEvent.getWriteId(),
+ updateTableColumnStatEvent.getWriteIds());
NotificationEvent event = new NotificationEvent(0, now(), EventType.UPDATE_TABLE_COLUMN_STAT.toString(),
msgEncoder.getSerializer().serialize(msg));
ColumnStatisticsDesc statDesc = updateTableColumnStatEvent.getColStats().getStatsDesc();
@@ -789,7 +790,8 @@ public void onUpdatePartitionColumnStat(UpdatePartitionColumnStatEvent updatePar
updatePartColStatEvent.getPartVals(),
updatePartColStatEvent.getPartParameters(),
updatePartColStatEvent.getTableObj(),
- updatePartColStatEvent.getWriteId());
+ updatePartColStatEvent.getWriteId(),
+ updatePartColStatEvent.getWriteIds());
NotificationEvent event = new NotificationEvent(0, now(), EventType.UPDATE_PARTITION_COLUMN_STAT.toString(),
msgEncoder.getSerializer().serialize(msg));
ColumnStatisticsDesc statDesc = updatePartColStatEvent.getPartColStats().getStatsDesc();
diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
index efafe0c641..afa17613fa 100644
--- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
+++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
@@ -208,7 +208,7 @@ public void onCreateTable(CreateTableEvent tableEvent) throws MetaException {
Configuration conf = handler.getConf();
Table newTbl;
try {
- newTbl = handler.get_table_core(tbl.getCatName(), tbl.getDbName(), tbl.getTableName())
+ newTbl = handler.get_table_core(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), null)
.deepCopy();
newTbl.getParameters().put(
HCatConstants.HCAT_MSGBUS_TOPIC_NAME,
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 0212e076cd..0e1df69656 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -263,11 +263,6 @@ public boolean dropTable(String catName, String dbName, String tableName)
}
}
- @Override
- public Table getTable(String catName, String dbName, String tableName) throws MetaException {
- return objectStore.getTable(catName, dbName, tableName);
- }
-
@Override
public Table getTable(String catName, String dbName, String tableName,
String writeIdList) throws MetaException {
@@ -280,12 +275,6 @@ public boolean addPartition(Partition part)
return objectStore.addPartition(part);
}
- @Override
- public Partition getPartition(String catName, String dbName, String tableName, List partVals)
- throws MetaException, NoSuchObjectException {
- return objectStore.getPartition(catName, dbName, tableName, partVals);
- }
-
@Override
public Partition getPartition(String catName, String dbName, String tableName,
List partVals, String writeIdList)
@@ -305,15 +294,15 @@ public boolean dropPartition(String catName, String dbName, String tableName, Li
}
@Override
- public List getPartitions(String catName, String dbName, String tableName, int max)
+ public List getPartitions(String catName, String dbName, String tableName, int max, String writeIdList)
throws MetaException, NoSuchObjectException {
- return objectStore.getPartitions(catName, dbName, tableName, max);
+ return objectStore.getPartitions(catName, dbName, tableName, max, writeIdList);
}
@Override
public Map getPartitionLocations(String catName, String dbName, String tblName,
- String baseLocationToNotShow, int max) {
- return objectStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max);
+ String baseLocationToNotShow, int max, String writeIdList) {
+ return objectStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max, writeIdList);
}
@Override
@@ -378,9 +367,9 @@ public Table alterTable(String catName, String dbName, String name, Table newTab
}
@Override
- public List listPartitionNames(String catName, String dbName, String tblName, short maxParts)
+ public List listPartitionNames(String catName, String dbName, String tblName, short maxParts, String writeIdList)
throws MetaException {
- return objectStore.listPartitionNames(catName, dbName, tblName, maxParts);
+ return objectStore.listPartitionNames(catName, dbName, tblName, maxParts, writeIdList);
}
@Override
@@ -388,7 +377,7 @@ public PartitionValuesResponse listPartitionValues(String catName, String db_nam
String tbl_name, List cols,
boolean applyDistinct, String filter,
boolean ascending, List order,
- long maxParts) throws MetaException {
+ long maxParts, String writeIdList) throws MetaException {
return null;
}
@@ -416,42 +405,43 @@ public Partition alterPartition(String catName, String dbName, String tblName, L
@Override
public List getPartitionsByFilter(String catName, String dbName, String tblName,
- String filter, short maxParts) throws MetaException, NoSuchObjectException {
- return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts);
+ String filter, short maxParts, String writeIdList) throws MetaException, NoSuchObjectException {
+ return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts, writeIdList);
}
@Override
public List getPartitionSpecsByFilterAndProjection(Table table,
- GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec)
+ GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec, String writeIdList)
throws MetaException, NoSuchObjectException {
- return objectStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec);
+ return objectStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec, writeIdList);
}
@Override
public int getNumPartitionsByFilter(String catName, String dbName, String tblName,
- String filter) throws MetaException, NoSuchObjectException {
- return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter);
+ String filter, String writeIdList) throws MetaException, NoSuchObjectException {
+ return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter, writeIdList);
}
@Override
public int getNumPartitionsByExpr(String catName, String dbName, String tblName,
- byte[] expr) throws MetaException, NoSuchObjectException {
- return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr);
+ byte[] expr, String writeIdList) throws MetaException, NoSuchObjectException {
+ return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr, writeIdList);
}
@Override
public List getPartitionsByNames(String catName, String dbName, String tblName,
- List partNames)
+ List partNames, String writeIdList)
throws MetaException, NoSuchObjectException {
return objectStore.getPartitionsByNames(
- catName, dbName, tblName, partNames);
+ catName, dbName, tblName, partNames, writeIdList);
}
@Override
public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr,
- String defaultPartitionName, short maxParts, List result) throws TException {
+ String defaultPartitionName, short maxParts, List result,
+ String writeIdList) throws TException {
return objectStore.getPartitionsByExpr(catName,
- dbName, tblName, expr, defaultPartitionName, maxParts, result);
+ dbName, tblName, expr, defaultPartitionName, maxParts, result, writeIdList);
}
@Override
@@ -622,34 +612,36 @@ public Role getRole(String roleName) throws NoSuchObjectException {
@Override
public Partition getPartitionWithAuth(String catName, String dbName, String tblName,
- List partVals, String userName, List groupNames)
+ List partVals, String userName,
+ List groupNames, String writeIdList)
throws MetaException, NoSuchObjectException, InvalidObjectException {
return objectStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName,
- groupNames);
+ groupNames, writeIdList);
}
@Override
public List getPartitionsWithAuth(String catName, String dbName, String tblName,
- short maxParts, String userName, List groupNames)
+ short maxParts, String userName,
+ List groupNames, String writeIdList)
throws MetaException, NoSuchObjectException, InvalidObjectException {
return objectStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName,
- groupNames);
+ groupNames, writeIdList);
}
@Override
public List listPartitionNamesPs(String catName, String dbName, String tblName,
- List partVals, short maxParts)
+ List partVals, short maxParts, String writeIdList)
throws MetaException, NoSuchObjectException {
- return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts);
+ return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts, writeIdList);
}
@Override
public List listPartitionsPsWithAuth(String catName, String dbName, String tblName,
List partVals, short maxParts, String userName,
- List groupNames)
+ List groupNames, String writeIdList)
throws MetaException, InvalidObjectException, NoSuchObjectException {
return objectStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts,
- userName, groupNames);
+ userName, groupNames, writeIdList);
}
@Override
@@ -720,12 +712,6 @@ public long cleanupEvents() {
return objectStore.listTableColumnGrantsAll(catName, dbName, tableName, columnName);
}
- @Override
- public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
- List colNames) throws MetaException, NoSuchObjectException {
- return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames);
- }
-
@Override
public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
List colNames,
@@ -817,14 +803,6 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro
}
- @Override
- public List getPartitionColumnStatistics(String catName, String dbName,
- String tblName, List colNames,
- List partNames)
- throws MetaException, NoSuchObjectException {
- return objectStore.getPartitionColumnStatistics(catName, dbName, tblName , colNames, partNames);
- }
-
@Override
public List getPartitionColumnStatistics(String catName, String dbName,
String tblName, List colNames,
@@ -837,9 +815,9 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro
@Override
public boolean doesPartitionExist(String catName, String dbName, String tableName,
- List partKeys, List partVals)
+ List partKeys, List partVals, String writeIdList)
throws MetaException, NoSuchObjectException {
- return objectStore.doesPartitionExist(catName, dbName, tableName, partKeys, partVals);
+ return objectStore.doesPartitionExist(catName, dbName, tableName, partKeys, partVals, writeIdList);
}
@Override
@@ -905,13 +883,6 @@ public Function getFunction(String catName, String dbName, String funcName)
return objectStore.getFunctions(catName, dbName, pattern);
}
- @Override
- public AggrStats get_aggr_stats_for(String catName, String dbName,
- String tblName, List partNames, List colNames)
- throws MetaException {
- return null;
- }
-
@Override
public AggrStats get_aggr_stats_for(String catName, String dbName,
String tblName, List partNames, List colNames,
@@ -1324,5 +1295,4 @@ public int deleteRuntimeStats(int maxRetainSecs) throws MetaException {
NoSuchObjectException {
return null;
}
-
}
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java
index 285f30b008..74fc40232d 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java
@@ -10,6 +10,7 @@
import org.apache.hadoop.hive.metastore.*;
import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
import org.apache.hadoop.hive.metastore.api.*;
+import org.apache.hadoop.hive.metastore.cache.CachedStore.MergedColumnStatsForPartitions;
import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
@@ -44,7 +45,6 @@ public void setUp() throws Exception {
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb");
MetastoreConf.setVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS, DbNotificationListener.class.getName());
MetastoreConf.setVar(conf, ConfVars.RAW_STORE_IMPL, "org.apache.hadoop.hive.metastore.cache.CachedStore");
- MetastoreConf.setBoolVar(conf, ConfVars.METASTORE_CACHE_CAN_USE_EVENT, true);
MetastoreConf.setBoolVar(conf, ConfVars.HIVE_TXN_STATS_ENABLED, true);
MetastoreConf.setBoolVar(conf, ConfVars.AGGREGATE_STATS_CACHE_ENABLED, false);
MetaStoreTestUtils.setConfForStandloneMode(conf);
@@ -120,84 +120,6 @@ private void comparePartitions(Partition part1, Partition part2) {
Assert.assertEquals(part1.getLastAccessTime(), part2.getLastAccessTime());
}
- @Test
- public void testDatabaseOpsForUpdateUsingEvents() throws Exception {
- RawStore rawStore = hmsHandler.getMS();
-
- // Prewarm CachedStore
- CachedStore.setCachePrewarmedState(false);
- CachedStore.prewarm(rawStore);
-
- // Add a db via rawStore
- String dbName = "testDatabaseOps";
- String dbOwner = "user1";
- Database db = createTestDb(dbName, dbOwner);
-
- hmsHandler.create_database(db);
- db = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
-
- // Read database via CachedStore
- Database dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName);
- Assert.assertEquals(db, dbRead);
-
- // Add another db via rawStore
- final String dbName1 = "testDatabaseOps1";
- Database db1 = createTestDb(dbName1, dbOwner);
- hmsHandler.create_database(db1);
- db1 = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1);
-
- // Read database via CachedStore
- dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName1);
- Assert.assertEquals(db1, dbRead);
-
- // Alter the db via rawStore (can only alter owner or parameters)
- dbOwner = "user2";
- Database newdb = new Database(db);
- newdb.setOwnerName(dbOwner);
- hmsHandler.alter_database(dbName, newdb);
- newdb = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
-
- // Read db via cachedStore
- dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName);
- Assert.assertEquals(newdb, dbRead);
-
- // Add another db via rawStore
- final String dbName2 = "testDatabaseOps2";
- Database db2 = createTestDb(dbName2, dbOwner);
- hmsHandler.create_database(db2);
- db2 = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName2);
-
- // Alter db "testDatabaseOps" via rawStore
- dbOwner = "user1";
- newdb = new Database(db);
- newdb.setOwnerName(dbOwner);
- hmsHandler.alter_database(dbName, newdb);
- newdb = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
-
- // Drop db "testDatabaseOps1" via rawStore
- Database dropDb = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1);
- hmsHandler.drop_database(dbName1, true, true);
-
- // Read the newly added db via CachedStore
- dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName2);
- Assert.assertEquals(db2, dbRead);
-
- // Read the altered db via CachedStore (altered user from "user2" to "user1")
- dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName);
- Assert.assertEquals(newdb, dbRead);
-
- // Try to read the dropped db after cache update
- dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName1);
- Assert.assertEquals(null, dbRead);
-
- // Clean up
- hmsHandler.drop_database(dbName, true, true);
- hmsHandler.drop_database(dbName2, true, true);
- sharedCache.getDatabaseCache().clear();
- sharedCache.clearTableCache();
- sharedCache.getSdCache().clear();
- }
-
@Test
public void testTableOpsForUpdateUsingEvents() throws Exception {
long lastEventId = -1;
@@ -205,7 +127,7 @@ public void testTableOpsForUpdateUsingEvents() throws Exception {
// Prewarm CachedStore
CachedStore.setCachePrewarmedState(false);
- CachedStore.prewarm(rawStore);
+ CachedStore.prewarm(rawStore, conf);
// Add a db via rawStore
String dbName = "test_table_ops";
@@ -225,19 +147,17 @@ public void testTableOpsForUpdateUsingEvents() throws Exception {
List ptnCols = new ArrayList();
Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols);
hmsHandler.create_table(tbl);
- tbl = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+ tbl = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName, null);
- // Read database, table via CachedStore
- Database dbRead= sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName);
- Assert.assertEquals(db, dbRead);
- Table tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName);
+ // Read table via CachedStore
+ Table tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, null);
compareTables(tblRead, tbl);
// Add a new table via rawStore
String tblName2 = "tbl2";
Table tbl2 = createTestTbl(dbName, tblName2, tblOwner, cols, ptnCols);
hmsHandler.create_table(tbl2);
- tbl2 = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2);
+ tbl2 = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2, null);
// Alter table "tbl" via rawStore
tblOwner = "role1";
@@ -245,7 +165,7 @@ public void testTableOpsForUpdateUsingEvents() throws Exception {
newTable.setOwner(tblOwner);
newTable.setOwnerType(PrincipalType.ROLE);
hmsHandler.alter_table(dbName, tblName, newTable);
- newTable = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+ newTable = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName, null);
Assert.assertEquals("Owner of the table did not change.", tblOwner, newTable.getOwner());
Assert.assertEquals("Owner type of the table did not change", PrincipalType.ROLE, newTable.getOwnerType());
@@ -254,23 +174,22 @@ public void testTableOpsForUpdateUsingEvents() throws Exception {
hmsHandler.drop_table(dbName, tblName2, true);
// Read the altered "tbl" via CachedStore
- tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName);
+ tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, null);
compareTables(tblRead, newTable);
// Try to read the dropped "tbl2" via CachedStore (should throw exception)
- tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName2);
+ tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName2, null);
Assert.assertNull(tblRead);
// Clean up
hmsHandler.drop_database(dbName, true, true);
- tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName2);
+ tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName2, null);
Assert.assertNull(tblRead);
- tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName);
+ tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, null);
Assert.assertNull(tblRead);
- sharedCache.getDatabaseCache().clear();
sharedCache.clearTableCache();
sharedCache.getSdCache().clear();
}
@@ -282,7 +201,7 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception {
// Prewarm CachedStore
CachedStore.setCachePrewarmedState(false);
- CachedStore.prewarm(rawStore);
+ CachedStore.prewarm(rawStore, conf);
// Add a db via rawStore
String dbName = "test_partition_ops";
@@ -304,7 +223,7 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception {
ptnCols.add(ptnCol1);
Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols);
hmsHandler.create_table(tbl);
- tbl = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+ tbl = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName, null);
final String ptnColVal1 = "aaa";
Map partParams = new HashMap();
@@ -313,7 +232,7 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception {
0, tbl.getSd(), partParams);
ptn1.setCatName(DEFAULT_CATALOG_NAME);
hmsHandler.add_partition(ptn1);
- ptn1 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1));
+ ptn1 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), null);
final String ptnColVal2 = "bbb";
Partition ptn2 =
@@ -321,13 +240,10 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception {
0, tbl.getSd(), partParams);
ptn2.setCatName(DEFAULT_CATALOG_NAME);
hmsHandler.add_partition(ptn2);
- ptn2 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2));
+ ptn2 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2), null);
- // Read database, table, partition via CachedStore
- Database dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME.toLowerCase(), dbName.toLowerCase());
- Assert.assertEquals(db, dbRead);
Table tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME.toLowerCase(),
- dbName.toLowerCase(), tblName.toLowerCase());
+ dbName.toLowerCase(), tblName.toLowerCase(), null);
compareTables(tbl, tblRead);
Partition ptn1Read = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME.toLowerCase(),
dbName.toLowerCase(), tblName.toLowerCase(), Arrays.asList(ptnColVal1));
@@ -343,20 +259,20 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception {
0, tbl.getSd(), partParams);
ptn3.setCatName(DEFAULT_CATALOG_NAME);
hmsHandler.add_partition(ptn3);
- ptn3 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3));
+ ptn3 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3), null);
// Alter an existing partition ("aaa") via rawStore
- ptn1 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1));
+ ptn1 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), null);
final String ptnColVal1Alt = "aaa";
Partition ptn1Atl =
new Partition(Arrays.asList(ptnColVal1Alt), dbName, tblName, 0,
0, tbl.getSd(), partParams);
ptn1Atl.setCatName(DEFAULT_CATALOG_NAME);
hmsHandler.alter_partitions(dbName, tblName, Arrays.asList(ptn1Atl));
- ptn1Atl = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt));
+ ptn1Atl = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt), null);
// Drop an existing partition ("bbb") via rawStore
- Partition ptnDrop = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2));
+ Partition ptnDrop = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2), null);
hmsHandler.drop_partition(dbName, tblName, Arrays.asList(ptnColVal2), false);
// Read the newly added partition via CachedStore
@@ -382,13 +298,12 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception {
// Clean up
rawStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName);
- sharedCache.getDatabaseCache().clear();
sharedCache.clearTableCache();
sharedCache.getSdCache().clear();
}
- private void updateTableColStats(String dbName, String tblName, String[] colName,
- double highValue, double avgColLen, boolean isTxnTable) throws Throwable {
+ private long updateTableColStats(String dbName, String tblName, String[] colName,
+ double highValue, double avgColLen, boolean isTxnTable, long lastEventId) throws Throwable {
long writeId = -1;
String validWriteIds = null;
if (isTxnTable) {
@@ -412,6 +327,7 @@ private void updateTableColStats(String dbName, String tblName, String[] colName
// write stats objs persistently
hmsHandler.update_table_column_statistics_req(setTblColStat);
+ lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, null);
validateTablePara(dbName, tblName);
ColumnStatistics colStatsCache = sharedCache.getTableColStatsFromCache(DEFAULT_CATALOG_NAME,
@@ -423,10 +339,11 @@ private void updateTableColStats(String dbName, String tblName, String[] colName
dbName, tblName, Lists.newArrayList(colName[1]), validWriteIds, true);
Assert.assertEquals(colStatsCache.getStatsObj().get(0).getColName(), colName[1]);
verifyStatString(colStatsCache.getStatsObj().get(0), colName[1], avgColLen);
+ return lastEventId;
}
- private void updatePartColStats(String dbName, String tblName, boolean isTxnTable, String[] colName,
- String partName, double highValue, double avgColLen) throws Throwable {
+ private long updatePartColStats(String dbName, String tblName, boolean isTxnTable, String[] colName,
+ String partName, double highValue, double avgColLen, long lastEventId) throws Throwable {
long writeId = -1;
String validWriteIds = null;
List txnIds = null;
@@ -471,7 +388,7 @@ private void updatePartColStats(String dbName, String tblName, boolean isTxnTabl
} else {
Assert.assertEquals(statRowStore.get(0).isIsStatsCompliant(), false);
}
-
+ lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf);
List statSharedCache = sharedCache.getPartitionColStatsListFromCache(DEFAULT_CATALOG_NAME,
dbName, tblName, Collections.singletonList(partName), Collections.singletonList(colName[1]),
validWriteIds, true);
@@ -489,6 +406,8 @@ private void updatePartColStats(String dbName, String tblName, boolean isTxnTabl
statPartCache = sharedCache.getPartitionColStatsFromCache(DEFAULT_CATALOG_NAME, dbName, tblName,
CachedStore.partNameToVals(partName), colName[1], validWriteIds);
verifyStatString(statPartCache.getColumnStatisticsObj(), colName[1], avgColLen);
+
+ return lastEventId;
}
private List getStatsObjects(String dbName, String tblName, String[] colName,
@@ -572,7 +491,7 @@ private void setUpBeforeTest(String dbName, String tblName, String[] colName, bo
// Prewarm CachedStore
CachedStore.setCachePrewarmedState(false);
- CachedStore.prewarm(rawStore);
+ CachedStore.prewarm(rawStore, conf);
// Add a db via rawStore
Database db = createTestDb(dbName, dbOwner);
@@ -670,8 +589,8 @@ private String getValidWriteIds(String dbName, String tblName) throws Throwable
}
private void validateTablePara(String dbName, String tblName) throws Throwable {
- Table tblRead = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
- Table tblRead1 = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName);
+ Table tblRead = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName, null);
+ Table tblRead1 = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, null);
Assert.assertEquals(tblRead.getParameters(), tblRead1.getParameters());
}
@@ -706,18 +625,19 @@ private void testTableColStatInternal(String dbName, String tblName, boolean isT
String[] colName = new String[]{"income", "name"};
double highValue = 1200000.4525;
double avgColLen = 50.30;
+ long lastEventId = 0;
setUpBeforeTest(dbName, tblName, colName, isTxnTable);
- updateTableColStats(dbName, tblName, colName, highValue, avgColLen, isTxnTable);
+ lastEventId = updateTableColStats(dbName, tblName, colName, highValue, avgColLen, isTxnTable, lastEventId);
if (!isTxnTable) {
deleteColStats(dbName, tblName, colName);
}
tblName = "tbl_part";
createTableWithPart(dbName, tblName, colName, isTxnTable);
- List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1);
+ List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null);
String partName = partitions.get(0);
- updatePartColStats(dbName, tblName, isTxnTable, colName, partName, highValue, avgColLen);
+ lastEventId = updatePartColStats(dbName, tblName, isTxnTable, colName, partName, highValue, avgColLen, lastEventId);
if (!isTxnTable) {
deletePartColStats(dbName, tblName, colName, partName);
}
@@ -747,11 +667,12 @@ public void testTableColumnStatisticsTxnTableMulti() throws Throwable {
setUpBeforeTest(dbName, null, colName, true);
createTableWithPart(dbName, tblName, colName, true);
- List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1);
+ List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null);
String partName = partitions.get(0);
- updatePartColStats(dbName, tblName, true, colName, partName, highValue, avgColLen);
- updatePartColStats(dbName, tblName, true, colName, partName, 1200000.4521, avgColLen);
- updatePartColStats(dbName, tblName, true, colName, partName, highValue, 34.78);
+ long lastEventId = 0;
+ lastEventId = updatePartColStats(dbName, tblName, true, colName, partName, highValue, avgColLen, lastEventId);
+ lastEventId = updatePartColStats(dbName, tblName, true, colName, partName, 1200000.4521, avgColLen, lastEventId);
+ lastEventId = updatePartColStats(dbName, tblName, true, colName, partName, highValue, 34.78, lastEventId);
}
@Test
@@ -761,10 +682,11 @@ public void testTableColumnStatisticsTxnTableMultiAbort() throws Throwable {
String[] colName = new String[]{"income", "name"};
double highValue = 1200000.4525;
double avgColLen = 50.30;
+ long lastEventId = 0;
setUpBeforeTest(dbName, null, colName, true);
createTableWithPart(dbName, tblName, colName, true);
- List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1);
+ List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null);
String partName = partitions.get(0);
List txnIds = allocateTxns(1);
@@ -804,6 +726,7 @@ public void testTableColumnStatisticsTxnTableMultiAbort() throws Throwable {
verifyStat(statRawStore.get(0).getStatsObj(), colName, highValue, avgColLen);
Assert.assertEquals(statRawStore.get(0).isIsStatsCompliant(), false);
+ lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf);
List statsListFromCache = sharedCache.getPartitionColStatsListFromCache(DEFAULT_CATALOG_NAME,
dbName, tblName, Collections.singletonList(partName), Collections.singletonList(colName[1]),
validWriteIds, true);
@@ -824,14 +747,15 @@ public void testTableColumnStatisticsTxnTableOpenTxn() throws Throwable {
String[] colName = new String[]{"income", "name"};
double highValue = 1200000.4121;
double avgColLen = 23.30;
+ long lastEventId = 0;
setUpBeforeTest(dbName, null, colName, true);
createTableWithPart(dbName, tblName, colName, true);
- List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1);
+ List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null);
String partName = partitions.get(0);
// update part col stats successfully.
- updatePartColStats(dbName, tblName, true, colName, partName, 1.2, 12.2);
+ lastEventId = updatePartColStats(dbName, tblName, true, colName, partName, 1.2, 12.2, lastEventId);
List txnIds = allocateTxns(1);
long writeId = allocateWriteIds(txnIds, dbName, tblName).get(0).getWriteId();
@@ -854,6 +778,7 @@ public void testTableColumnStatisticsTxnTableOpenTxn() throws Throwable {
// write stats objs persistently
hmsHandler.update_partition_column_statistics_req(setTblColStat);
+ lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf);
// keep the txn open and verify that the stats got is not compliant.
@@ -904,9 +829,9 @@ private void verifyAggrStat(String dbName, String tblName, String[] colName, Lis
Assert.assertEquals(aggrStatsCached, aggrStats);
//Assert.assertEquals(aggrStatsCached.isIsStatsCompliant(), true);
- List stats = sharedCache.getAggrStatsFromCache(DEFAULT_CATALOG_NAME, dbName, tblName,
- Collections.singletonList(colName[0]), SharedCache.StatsType.ALL);
- Assert.assertEquals(stats.get(0).getStatsData().getDoubleStats().getHighValue(), highValue, 0.01);
+ MergedColumnStatsForPartitions stats = CachedStore.mergeColStatsForPartitions(DEFAULT_CATALOG_NAME, dbName, tblName, Lists.newArrayList("income=1", "income=2"),
+ Collections.singletonList(colName[0]), sharedCache, SharedCache.StatsType.ALL, validWriteIds, false, 0.0);
+ Assert.assertEquals(stats.colStats.get(0).getStatsData().getDoubleStats().getHighValue(), highValue, 0.01);
}
@Test
@@ -917,15 +842,17 @@ public void testAggrStat() throws Throwable {
setUpBeforeTest(dbName, null, colName, false);
createTableWithPart(dbName, tblName, colName, false);
- List partitions = hmsHandler.get_partition_names(dbName, tblName, (short) -1);
+ List partitions = hmsHandler.get_partition_names(dbName, tblName, (short) -1, null);
String partName = partitions.get(0);
// update part col stats successfully.
- updatePartColStats(dbName, tblName, false, colName, partitions.get(0), 2, 12);
- updatePartColStats(dbName, tblName, false, colName, partitions.get(1), 4, 10);
+ long lastEventId = 0;
+ lastEventId = updatePartColStats(dbName, tblName, false, colName, partitions.get(0), 2, 12, lastEventId);
+ lastEventId = updatePartColStats(dbName, tblName, false, colName, partitions.get(1), 4, 10, lastEventId);
+ lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf);
verifyAggrStat(dbName, tblName, colName, partitions, false, 4);
- updatePartColStats(dbName, tblName, false, colName, partitions.get(1), 3, 10);
+ lastEventId = updatePartColStats(dbName, tblName, false, colName, partitions.get(1), 3, 10, lastEventId);
verifyAggrStat(dbName, tblName, colName, partitions, false, 3);
}
@@ -934,18 +861,19 @@ public void testAggrStatTxnTable() throws Throwable {
String dbName = "aggr_stats_test_db_txn";
String tblName = "tbl_part";
String[] colName = new String[]{"income", "name"};
+ long lastEventId = 0;
setUpBeforeTest(dbName, null, colName, true);
createTableWithPart(dbName, tblName, colName, true);
- List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1);
+ List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null);
String partName = partitions.get(0);
// update part col stats successfully.
- updatePartColStats(dbName, tblName, true, colName, partitions.get(0), 2, 12);
- updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 4, 10);
+ lastEventId = updatePartColStats(dbName, tblName, true, colName, partitions.get(0), 2, 12, lastEventId);
+ lastEventId = updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 4, 10, lastEventId);
verifyAggrStat(dbName, tblName, colName, partitions, true, 4);
- updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 3, 10);
+ lastEventId = updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 3, 10, lastEventId);
verifyAggrStat(dbName, tblName, colName, partitions, true, 3);
List txnIds = allocateTxns(1);
@@ -988,15 +916,16 @@ public void testAggrStatAbortTxn() throws Throwable {
String dbName = "aggr_stats_test_db_txn_abort";
String tblName = "tbl_part";
String[] colName = new String[]{"income", "name"};
+ long lastEventId = 0;
setUpBeforeTest(dbName, null, colName, true);
createTableWithPart(dbName, tblName, colName, true);
- List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1);
+ List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null);
String partName = partitions.get(0);
// update part col stats successfully.
- updatePartColStats(dbName, tblName, true, colName, partitions.get(0), 2, 12);
- updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 4, 10);
+ lastEventId = updatePartColStats(dbName, tblName, true, colName, partitions.get(0), 2, 12, lastEventId);
+ lastEventId = updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 4, 10, lastEventId);
verifyAggrStat(dbName, tblName, colName, partitions, true, 4);
List txnIds = allocateTxns(4);
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
index 61be5a3a5b..6ab6574fc9 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
@@ -1699,6 +1699,9 @@ public boolean isWriteIdAborted(long writeid) {
public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId) {
return RangeResponse.ALL;
}
+
+ @Override
+ public void commitWriteId(long writeId) {};
};
OrcInputFormat aif = new OrcInputFormat();
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index d2c2ccd5ea..017db09c10 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -157,8 +157,9 @@ public void initConf() throws Exception {
}
// Plug verifying metastore in for testing DirectSQL.
- conf.setVar(ConfVars.METASTORE_RAW_STORE_IMPL, "org.apache.hadoop.hive.metastore.VerifyingObjectStore");
-
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.RAW_STORE_IMPL, "org.apache.hadoop.hive.metastore.cache.CachedStore");
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.TRANSACTIONAL_EVENT_LISTENERS, "org.apache.hive.hcatalog.listener.DbNotificationListener");
+ HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager");
miniClusters.initConf(conf);
}
@@ -289,6 +290,7 @@ public void clearTablesCreatedDuringTests() throws Exception {
conf.set("hive.metastore.filter.hook", "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl");
db = Hive.get(conf);
+ SessionState.get().initTxnMgr(conf);
// First delete any MVs to avoid race conditions
for (String dbName : db.getAllDatabases()) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index ae622c8be5..cc86799a32 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -1738,35 +1738,38 @@ public void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnMa
}
// If we've opened a transaction we need to commit or rollback rather than explicitly
// releasing the locks.
- conf.unset(ValidTxnList.VALID_TXNS_KEY);
- conf.unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY);
if(!checkConcurrency()) {
return;
}
- if (txnMgr.isTxnOpen()) {
- if (commit) {
- if(conf.getBoolVar(ConfVars.HIVE_IN_TEST) && conf.getBoolVar(ConfVars.HIVETESTMODEROLLBACKTXN)) {
+ try {
+ if (txnMgr.isTxnOpen()) {
+ if (commit) {
+ if(conf.getBoolVar(ConfVars.HIVE_IN_TEST) && conf.getBoolVar(ConfVars.HIVETESTMODEROLLBACKTXN)) {
+ txnMgr.rollbackTxn();
+ }
+ else {
+ txnMgr.commitTxn();//both commit & rollback clear ALL locks for this tx
+ }
+ } else {
txnMgr.rollbackTxn();
}
- else {
- txnMgr.commitTxn();//both commit & rollback clear ALL locks for this tx
- }
} else {
- txnMgr.rollbackTxn();
+ //since there is no tx, we only have locks for current query (if any)
+ if (ctx != null && ctx.getHiveLocks() != null) {
+ hiveLocks.addAll(ctx.getHiveLocks());
+ }
+ txnMgr.releaseLocks(hiveLocks);
}
- } else {
- //since there is no tx, we only have locks for current query (if any)
- if (ctx != null && ctx.getHiveLocks() != null) {
- hiveLocks.addAll(ctx.getHiveLocks());
+ } finally {
+ hiveLocks.clear();
+ if (ctx != null) {
+ ctx.setHiveLocks(null);
}
- txnMgr.releaseLocks(hiveLocks);
- }
- hiveLocks.clear();
- if (ctx != null) {
- ctx.setHiveLocks(null);
- }
- perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RELEASE_LOCKS);
+ conf.unset(ValidTxnList.VALID_TXNS_KEY);
+ conf.unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY);
+ perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RELEASE_LOCKS);
+ }
}
/**
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java
index 267f7d041f..1077421ac4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java
@@ -21,6 +21,8 @@
import java.util.Map;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.HiveOperation;
import org.apache.hadoop.hive.ql.session.LineageState;
import org.apache.hadoop.hive.ql.session.SessionState;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 295fe7cbd0..c655db26f6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -1969,6 +1969,45 @@ public static TableSnapshot getTableSnapshot(Configuration conf,
validWriteIdList != null ? validWriteIdList.toString() : null);
}
+ /**
+ * This is called by Hive.java for all write operations (DDL). Advance write id
+ * for the table via transaction manager, and store it in config. The write id
+ * will be marked as committed instantly in config, as all DDL are auto
+ * committed, there's no chance to rollback.
+ */
+ public static ValidWriteIdList advanceWriteId(HiveConf conf, Table tbl) throws LockException {
+ if (!isTransactionalTable(tbl)) {
+ return null;
+ }
+ if (SessionState.get().getTxnMgr() == null) {
+ SessionState.get().initTxnMgr(conf);
+ }
+ HiveTxnManager txnMgr = SessionState.get().getTxnMgr();
+ long writeId = txnMgr.getTableWriteId(tbl.getDbName(), tbl.getTableName());
+ List txnTables = new ArrayList<>();
+ String fullTableName = getFullTableName(tbl.getDbName(), tbl.getTableName());
+ txnTables.add(fullTableName);
+ ValidTxnWriteIdList txnWriteIds;
+ if (conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY) != null) {
+ txnWriteIds = new ValidTxnWriteIdList(conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY));
+ } else {
+ String txnString;
+ if (conf.get(ValidTxnList.VALID_TXNS_KEY) != null) {
+ txnString = conf.get(ValidTxnList.VALID_TXNS_KEY);
+ } else {
+ ValidTxnList txnIds = txnMgr.getValidTxns();
+ txnString = txnIds.toString();
+ }
+ txnWriteIds = txnMgr.getValidWriteIds(txnTables, txnString);
+ }
+ ValidWriteIdList writeIds = txnWriteIds.getTableValidWriteIdList(fullTableName);
+ if (writeIds != null) {
+ writeIds.commitWriteId(writeId);
+ conf.set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, txnWriteIds.toString());
+ }
+ return writeIds;
+ }
+
/**
* Returns ValidWriteIdList for the table with the given "dbName" and "tableName".
* This is called when HiveConf has no list for the table.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 2ae1db57aa..7aae1934a1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -721,6 +721,10 @@ public void alterTable(String catName, String dbName, String tblName, Table newT
replWriteId);
tableSnapshot = new TableSnapshot(replWriteId, writeIds.writeToString());
} else {
+ if (AcidUtils.isTransactionalTable(newTbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, newTbl);
+ }
// Make sure we pass in the names, so we can get the correct snapshot for rename table.
tableSnapshot = AcidUtils.getTableSnapshot(conf, newTbl, dbName, tblName, true);
}
@@ -803,6 +807,12 @@ public void alterPartition(String catName, String dbName, String tblName, Partit
if (environmentContext == null) {
environmentContext = new EnvironmentContext();
}
+
+ if (AcidUtils.isTransactionalTable(newPart.getTable())) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, newPart.getTable());
+ }
+
AcidUtils.TableSnapshot tableSnapshot = null;
if (transactional) {
tableSnapshot = AcidUtils.getTableSnapshot(conf, newPart.getTable(), true);
@@ -851,6 +861,10 @@ public void alterPartitions(String tblName, List newParts,
List newTParts =
new ArrayList();
try {
+ if (AcidUtils.isTransactionalTable(newParts.get(0).getTable())) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, newParts.get(0).getTable());
+ }
AcidUtils.TableSnapshot tableSnapshot = null;
if (transactional) {
tableSnapshot = AcidUtils.getTableSnapshot(conf, newParts.get(0).getTable(), true);
@@ -923,6 +937,10 @@ public void renamePartition(Table tbl, Map oldPartSpec, Partitio
tbl.getTableName()), new long[0], new BitSet(), replWriteId);
tableSnapshot = new TableSnapshot(replWriteId, writeIds.writeToString());
} else {
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
// Set table snapshot to api.Table to make it persistent.
tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true);
}
@@ -1025,6 +1043,10 @@ public void createTable(Table tbl, boolean ifNotExists,
tTbl.setPrivileges(principalPrivs);
}
}
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
// Set table snapshot to api.Table to make it persistent. A transactional table being
// replicated may have a valid write Id copied from the source. Use that instead of
// crafting one on the replica.
@@ -1145,6 +1167,15 @@ public void dropTable(String dbName, String tableName, boolean deleteData,
public void dropTable(String dbName, String tableName, boolean deleteData,
boolean ignoreUnknownTab, boolean ifPurge) throws HiveException {
try {
+ Table tbl = null;
+ try {
+ tbl = getTable(dbName, tableName);
+ } catch (InvalidTableException e) {
+ }
+ if (tbl != null && AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
getMSC().dropTable(dbName, tableName, deleteData, ignoreUnknownTab, ifPurge);
} catch (NoSuchObjectException e) {
if (!ignoreUnknownTab) {
@@ -1174,11 +1205,15 @@ public void dropTable(String dbName, String tableName, boolean deleteData,
public void truncateTable(String dbDotTableName, Map partSpec, Long writeId) throws HiveException {
try {
Table table = getTable(dbDotTableName, true);
+
AcidUtils.TableSnapshot snapshot = null;
if (AcidUtils.isTransactionalTable(table)) {
if (writeId <= 0) {
snapshot = AcidUtils.getTableSnapshot(conf, table, true);
} else {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, table);
+
String fullTableName = getFullTableName(table.getDbName(), table.getTableName());
ValidWriteIdList writeIdList = getMSC().getValidWriteIds(fullTableName, writeId);
snapshot = new TableSnapshot(writeId, writeIdList.writeToString());
@@ -2003,6 +2038,10 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par
inheritLocation, isSkewedStoreAsSubdir, isSrcLocal, isAcidIUDoperation,
resetStatistics, writeId, stmtId, isInsertOverwrite, isTxnTable, newFiles);
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
AcidUtils.TableSnapshot tableSnapshot = isTxnTable ? getTableSnapshot(tbl, writeId) : null;
if (tableSnapshot != null) {
newTPart.getTPartition().setWriteId(tableSnapshot.getWriteId());
@@ -2693,6 +2732,10 @@ private void constructOneLBLocationMap(FileStatus fSta,
}
boolean isTxnTable = AcidUtils.isTransactionalTable(tbl);
+ if (isTxnTable) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
AcidUtils.TableSnapshot tableSnapshot = isTxnTable ? getTableSnapshot(tbl, writeId) : null;
for (Entry entry : partitionDetailsMap.entrySet()) {
@@ -2991,6 +3034,10 @@ public Partition createPartition(Table tbl, Map partSpec) throws
try {
org.apache.hadoop.hive.metastore.api.Partition part =
Partition.createMetaPartitionObject(tbl, partSpec, null);
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
part.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : 0);
return new Partition(tbl, getMSC().add_partition(part));
@@ -3022,6 +3069,10 @@ public Partition createPartition(Table tbl, Map partSpec) throws
tbl.getTableName()),
new long[0], new BitSet(), writeId).writeToString();
} else {
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true);
if (tableSnapshot != null && tableSnapshot.getWriteId() > 0) {
writeId = tableSnapshot.getWriteId();
@@ -3430,6 +3481,11 @@ public boolean dropPartition(String db_name, String tbl_name,
public boolean dropPartition(String dbName, String tableName, List partVals, PartitionDropOptions options)
throws HiveException {
try {
+ Table tbl = getTable(dbName, tableName);
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
return getMSC().dropPartition(dbName, tableName, partVals, options);
} catch (NoSuchObjectException e) {
throw new HiveException("Partition or table doesn't exist.", e);
@@ -3539,6 +3595,10 @@ public boolean dropPartition(String dbName, String tableName, List partV
throws HiveException {
try {
Table tbl = getTable(dbName, tblName);
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
List> partExprs =
new ArrayList<>(partSpecs.size());
for (AlterTableDropPartitionDesc.PartitionDesc partSpec : partSpecs) {
@@ -4983,6 +5043,16 @@ public static boolean isHadoop1() {
String sourceDb, String sourceTable, String destDb,
String destinationTableName) throws HiveException {
try {
+ Table srcTbl = getTable(sourceDb, sourceTable);
+ if (AcidUtils.isTransactionalTable(srcTbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, srcTbl);
+ }
+ Table descTbl = getTable(destDb, destinationTableName);
+ if (AcidUtils.isTransactionalTable(descTbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, descTbl);
+ }
List partitions =
getMSC().exchange_partitions(partitionSpecs, sourceDb, sourceTable, destDb,
destinationTableName);
@@ -5218,6 +5288,11 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName,
public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName)
throws HiveException {
try {
+ Table tbl = getTable(dbName, tableName);
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
return getMSC().deleteTableColumnStatistics(dbName, tableName, colName);
} catch(Exception e) {
LOG.debug(StringUtils.stringifyException(e));
@@ -5228,6 +5303,11 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri
public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName,
String colName) throws HiveException {
try {
+ Table tbl = getTable(dbName, tableName);
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
return getMSC().deletePartitionColumnStatistics(dbName, tableName, partName, colName);
} catch(Exception e) {
LOG.debug(StringUtils.stringifyException(e));
@@ -5477,6 +5557,11 @@ public void cacheFileMetadata(
public void dropConstraint(String dbName, String tableName, String constraintName)
throws HiveException, NoSuchObjectException {
try {
+ Table tbl = getTable(dbName, tableName);
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
getMSC().dropConstraint(dbName, tableName, constraintName);
} catch (NoSuchObjectException e) {
throw e;
@@ -5808,6 +5893,11 @@ public CheckConstraint getCheckConstraints(String dbName, String tblName)
public void addPrimaryKey(List primaryKeyCols)
throws HiveException, NoSuchObjectException {
try {
+ Table tbl = getTable(primaryKeyCols.get(0).getTable_db(), primaryKeyCols.get(0).getTable_name());
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
getMSC().addPrimaryKey(primaryKeyCols);
} catch (Exception e) {
throw new HiveException(e);
@@ -5817,6 +5907,11 @@ public void addPrimaryKey(List primaryKeyCols)
public void addForeignKey(List foreignKeyCols)
throws HiveException, NoSuchObjectException {
try {
+ Table tbl = getTable(foreignKeyCols.get(0).getFktable_db(), foreignKeyCols.get(0).getFktable_name());
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
getMSC().addForeignKey(foreignKeyCols);
} catch (Exception e) {
throw new HiveException(e);
@@ -5826,6 +5921,11 @@ public void addForeignKey(List foreignKeyCols)
public void addUniqueConstraint(List uniqueConstraintCols)
throws HiveException, NoSuchObjectException {
try {
+ Table tbl = getTable(uniqueConstraintCols.get(0).getTable_db(), uniqueConstraintCols.get(0).getTable_name());
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
getMSC().addUniqueConstraint(uniqueConstraintCols);
} catch (Exception e) {
throw new HiveException(e);
@@ -5835,6 +5935,11 @@ public void addUniqueConstraint(List uniqueConstraintCols)
public void addNotNullConstraint(List notNullConstraintCols)
throws HiveException, NoSuchObjectException {
try {
+ Table tbl = getTable(notNullConstraintCols.get(0).getTable_db(), notNullConstraintCols.get(0).getTable_name());
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
getMSC().addNotNullConstraint(notNullConstraintCols);
} catch (Exception e) {
throw new HiveException(e);
@@ -5844,6 +5949,11 @@ public void addNotNullConstraint(List notNullConstraintCol
public void addDefaultConstraint(List defaultConstraints)
throws HiveException, NoSuchObjectException {
try {
+ Table tbl = getTable(defaultConstraints.get(0).getTable_db(), defaultConstraints.get(0).getTable_name());
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
getMSC().addDefaultConstraint(defaultConstraints);
} catch (Exception e) {
throw new HiveException(e);
@@ -5853,6 +5963,11 @@ public void addDefaultConstraint(List defaultConstraints)
public void addCheckConstraint(List checkConstraints)
throws HiveException, NoSuchObjectException {
try {
+ Table tbl = getTable(checkConstraints.get(0).getTable_db(), checkConstraints.get(0).getTable_name());
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ // Advance writeId for ddl on transactional table
+ AcidUtils.advanceWriteId(conf, tbl);
+ }
getMSC().addCheckConstraint(checkConstraints);
} catch (Exception e) {
throw new HiveException(e);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index d395db1b59..b66b7246e1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -67,10 +67,12 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.common.ObjectPair;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.common.StatsSetupConst.StatDB;
import org.apache.hadoop.hive.common.StringInternUtils;
+import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
@@ -12261,6 +12263,26 @@ else if(ast.getChild(0).getType() == HiveParser.TOK_FALSE) {
}
LOG.info("Completed phase 1 of Semantic Analysis");
+ // 5. Get write id for tables
+ List tabNames = new ArrayList<>();
+ for (String alias : qb.getTabAliases()) {
+ String tabName = qb.getTabNameForAlias(alias);
+ tabName = TableName.fromString(tabName, SessionState.get().getCurrentCatalog(), SessionState.get().getCurrentDatabase()).getDbTable();
+ tabNames.add(tabName);
+ }
+ String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY);
+ if ((txnString == null) || (txnString.isEmpty())) {
+ throw new IllegalStateException("calling recordValidWritsIdss() without initializing ValidTxnList " +
+ JavaUtils.txnIdToString(getTxnMgr().getCurrentTxnId()));
+ }
+ try {
+ ValidTxnWriteIdList txnWriteIds = getTxnMgr().getValidWriteIds(tabNames, txnString);
+ db.getMSC().setValidWriteIdList(txnWriteIds.toString());
+ Hive.get().getMSC().setValidWriteIdList(txnWriteIds.toString());
+ } catch (HiveException|MetaException e) {
+ throw new SemanticException("Failed to fetch write Id", e);
+ }
+
// 5. Resolve Parse Tree
// Materialization is allowed if it is not a view definition
getMetaData(qb, createVwDesc == null);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
index 2cc057ee6e..b4da7d4354 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
@@ -467,7 +467,7 @@ public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition mapiPart,
String catName = mapiPart.isSetCatName() ? mapiPart.getCatName() :
MetaStoreUtils.getDefaultCatalog(context.getHandler().getConf());
org.apache.hadoop.hive.metastore.api.Table t = context.getHandler().get_table_core(
- catName, mapiPart.getDbName(), mapiPart.getTableName());
+ catName, mapiPart.getDbName(), mapiPart.getTableName(), null);
if (wrapperApiPart.getSd() == null){
// In the cases of create partition, by the time this event fires, the partition
// object has not yet come into existence, and thus will not yet have a
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java
index 8acb1c54db..fbabf15fd6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java
@@ -213,7 +213,7 @@ private void stopWorkers() {
throws MetaException, NoSuchTxnException, NoSuchObjectException {
if (isAnalyzeTableInProgress(fullTableName)) return null;
String cat = fullTableName.getCat(), db = fullTableName.getDb(), tbl = fullTableName.getTable();
- Table table = rs.getTable(cat, db, tbl);
+ Table table = rs.getTable(cat, db, tbl, null);
LOG.debug("Processing table {}", table);
// Check if the table should be skipped.
@@ -297,7 +297,7 @@ private void stopWorkers() {
try {
colsPerPartition = rs.getPartitionColsWithStats(cat, db, tbl);
partNames = Lists.newArrayList(colsPerPartition.keySet());
- int partitionCount = rs.getNumPartitionsByFilter(cat, db, tbl, "");
+ int partitionCount = rs.getNumPartitionsByFilter(cat, db, tbl, "", null);
isAllParts = partitionCount == partNames.size();
isOk = true;
} finally {
@@ -308,10 +308,10 @@ private void stopWorkers() {
}
}
} else {
- partNames = rs.listPartitionNames(cat, db, tbl, (short) -1);
+ partNames = rs.listPartitionNames(cat, db, tbl, (short) -1, null);
isAllParts = true;
}
- Table t = rs.getTable(cat, db, tbl);
+ Table t = rs.getTable(cat, db, tbl, null);
List currentBatch = null;
int nextBatchStart = 0, nextIxInBatch = -1, currentBatchStart = 0;
List colsToUpdateForAll = null;
@@ -325,7 +325,7 @@ private void stopWorkers() {
currentBatchStart = nextBatchStart;
nextBatchStart = nextBatchEnd;
try {
- currentBatch = rs.getPartitionsByNames(cat, db, tbl, currentNames);
+ currentBatch = rs.getPartitionsByNames(cat, db, tbl, currentNames, null);
} catch (NoSuchObjectException e) {
LOG.error("Failed to get partitions for " + fullTableName + ", skipping some partitions", e);
currentBatch = null;
@@ -444,7 +444,7 @@ private String buildPartColStr(Table table) {
try {
// Note: this should NOT do txn verification - we want to get outdated stats, to
// see if we need to update anything.
- existingStats = rs.getTableColumnStatistics(cat, db, tbl, allCols);
+ existingStats = rs.getTableColumnStatistics(cat, db, tbl, allCols, null);
} catch (NoSuchObjectException e) {
LOG.error("Cannot retrieve existing stats, skipping " + fullTableName, e);
return null;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java
index a6dd4fa003..582c4bfe48 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java
@@ -67,7 +67,7 @@ public void init(AtomicBoolean stop, AtomicBoolean looped) throws Exception {
@Override Table resolveTable(CompactionInfo ci) throws MetaException {
try {
- return rs.getTable(getDefaultCatalog(conf), ci.dbname, ci.tableName);
+ return rs.getTable(getDefaultCatalog(conf), ci.dbname, ci.tableName, null);
} catch (MetaException e) {
LOG.error("Unable to find table " + ci.getFullTableName() + ", " + e.getMessage());
throw e;
@@ -88,7 +88,7 @@ public void init(AtomicBoolean stop, AtomicBoolean looped) throws Exception {
@Override List getPartitionsByNames(CompactionInfo ci) throws MetaException {
try {
return rs.getPartitionsByNames(getDefaultCatalog(conf), ci.dbname, ci.tableName,
- Collections.singletonList(ci.partName));
+ Collections.singletonList(ci.partName), null);
} catch (MetaException e) {
LOG.error("Unable to get partitions by name for CompactionInfo=" + ci);
throw e;
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
index b26586ab62..1219777ceb 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
@@ -816,13 +816,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDynamicPartition
case 5: // PARTITIONNAMES
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list756 = iprot.readListBegin();
- struct.partitionnames = new ArrayList(_list756.size);
- String _elem757;
- for (int _i758 = 0; _i758 < _list756.size; ++_i758)
+ org.apache.thrift.protocol.TList _list764 = iprot.readListBegin();
+ struct.partitionnames = new ArrayList(_list764.size);
+ String _elem765;
+ for (int _i766 = 0; _i766 < _list764.size; ++_i766)
{
- _elem757 = iprot.readString();
- struct.partitionnames.add(_elem757);
+ _elem765 = iprot.readString();
+ struct.partitionnames.add(_elem765);
}
iprot.readListEnd();
}
@@ -872,9 +872,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDynamicPartitio
oprot.writeFieldBegin(PARTITIONNAMES_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionnames.size()));
- for (String _iter759 : struct.partitionnames)
+ for (String _iter767 : struct.partitionnames)
{
- oprot.writeString(_iter759);
+ oprot.writeString(_iter767);
}
oprot.writeListEnd();
}
@@ -910,9 +910,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartition
oprot.writeString(struct.tablename);
{
oprot.writeI32(struct.partitionnames.size());
- for (String _iter760 : struct.partitionnames)
+ for (String _iter768 : struct.partitionnames)
{
- oprot.writeString(_iter760);
+ oprot.writeString(_iter768);
}
}
BitSet optionals = new BitSet();
@@ -937,13 +937,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartitions
struct.tablename = iprot.readString();
struct.setTablenameIsSet(true);
{
- org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
- struct.partitionnames = new ArrayList(_list761.size);
- String _elem762;
- for (int _i763 = 0; _i763 < _list761.size; ++_i763)
+ org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+ struct.partitionnames = new ArrayList(_list769.size);
+ String _elem770;
+ for (int _i771 = 0; _i771 < _list769.size; ++_i771)
{
- _elem762 = iprot.readString();
- struct.partitionnames.add(_elem762);
+ _elem770 = iprot.readString();
+ struct.partitionnames.add(_elem770);
}
}
struct.setPartitionnamesIsSet(true);
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java
index 361332b600..cf8bbd13ec 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java
@@ -716,13 +716,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI
case 3: // TXN_IDS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list682 = iprot.readListBegin();
- struct.txnIds = new ArrayList(_list682.size);
- long _elem683;
- for (int _i684 = 0; _i684 < _list682.size; ++_i684)
+ org.apache.thrift.protocol.TList _list690 = iprot.readListBegin();
+ struct.txnIds = new ArrayList(_list690.size);
+ long _elem691;
+ for (int _i692 = 0; _i692 < _list690.size; ++_i692)
{
- _elem683 = iprot.readI64();
- struct.txnIds.add(_elem683);
+ _elem691 = iprot.readI64();
+ struct.txnIds.add(_elem691);
}
iprot.readListEnd();
}
@@ -742,14 +742,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI
case 5: // SRC_TXN_TO_WRITE_ID_LIST
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list685 = iprot.readListBegin();
- struct.srcTxnToWriteIdList = new ArrayList(_list685.size);
- TxnToWriteId _elem686;
- for (int _i687 = 0; _i687 < _list685.size; ++_i687)
+ org.apache.thrift.protocol.TList _list693 = iprot.readListBegin();
+ struct.srcTxnToWriteIdList = new ArrayList(_list693.size);
+ TxnToWriteId _elem694;
+ for (int _i695 = 0; _i695 < _list693.size; ++_i695)
{
- _elem686 = new TxnToWriteId();
- _elem686.read(iprot);
- struct.srcTxnToWriteIdList.add(_elem686);
+ _elem694 = new TxnToWriteId();
+ _elem694.read(iprot);
+ struct.srcTxnToWriteIdList.add(_elem694);
}
iprot.readListEnd();
}
@@ -786,9 +786,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite
oprot.writeFieldBegin(TXN_IDS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txnIds.size()));
- for (long _iter688 : struct.txnIds)
+ for (long _iter696 : struct.txnIds)
{
- oprot.writeI64(_iter688);
+ oprot.writeI64(_iter696);
}
oprot.writeListEnd();
}
@@ -807,9 +807,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite
oprot.writeFieldBegin(SRC_TXN_TO_WRITE_ID_LIST_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.srcTxnToWriteIdList.size()));
- for (TxnToWriteId _iter689 : struct.srcTxnToWriteIdList)
+ for (TxnToWriteId _iter697 : struct.srcTxnToWriteIdList)
{
- _iter689.write(oprot);
+ _iter697.write(oprot);
}
oprot.writeListEnd();
}
@@ -849,9 +849,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI
if (struct.isSetTxnIds()) {
{
oprot.writeI32(struct.txnIds.size());
- for (long _iter690 : struct.txnIds)
+ for (long _iter698 : struct.txnIds)
{
- oprot.writeI64(_iter690);
+ oprot.writeI64(_iter698);
}
}
}
@@ -861,9 +861,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI
if (struct.isSetSrcTxnToWriteIdList()) {
{
oprot.writeI32(struct.srcTxnToWriteIdList.size());
- for (TxnToWriteId _iter691 : struct.srcTxnToWriteIdList)
+ for (TxnToWriteId _iter699 : struct.srcTxnToWriteIdList)
{
- _iter691.write(oprot);
+ _iter699.write(oprot);
}
}
}
@@ -879,13 +879,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteId
BitSet incoming = iprot.readBitSet(3);
if (incoming.get(0)) {
{
- org.apache.thrift.protocol.TList _list692 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
- struct.txnIds = new ArrayList(_list692.size);
- long _elem693;
- for (int _i694 = 0; _i694 < _list692.size; ++_i694)
+ org.apache.thrift.protocol.TList _list700 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+ struct.txnIds = new ArrayList(_list700.size);
+ long _elem701;
+ for (int _i702 = 0; _i702 < _list700.size; ++_i702)
{
- _elem693 = iprot.readI64();
- struct.txnIds.add(_elem693);
+ _elem701 = iprot.readI64();
+ struct.txnIds.add(_elem701);
}
}
struct.setTxnIdsIsSet(true);
@@ -896,14 +896,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteId
}
if (incoming.get(2)) {
{
- org.apache.thrift.protocol.TList _list695 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.srcTxnToWriteIdList = new ArrayList(_list695.size);
- TxnToWriteId _elem696;
- for (int _i697 = 0; _i697 < _list695.size; ++_i697)
+ org.apache.thrift.protocol.TList _list703 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.srcTxnToWriteIdList = new ArrayList(_list703.size);
+ TxnToWriteId _elem704;
+ for (int _i705 = 0; _i705 < _list703.size; ++_i705)
{
- _elem696 = new TxnToWriteId();
- _elem696.read(iprot);
- struct.srcTxnToWriteIdList.add(_elem696);
+ _elem704 = new TxnToWriteId();
+ _elem704.read(iprot);
+ struct.srcTxnToWriteIdList.add(_elem704);
}
}
struct.setSrcTxnToWriteIdListIsSet(true);
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java
index aaf187b4bd..f71f286638 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java
@@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI
case 1: // TXN_TO_WRITE_IDS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list698 = iprot.readListBegin();
- struct.txnToWriteIds = new ArrayList(_list698.size);
- TxnToWriteId _elem699;
- for (int _i700 = 0; _i700 < _list698.size; ++_i700)
+ org.apache.thrift.protocol.TList _list706 = iprot.readListBegin();
+ struct.txnToWriteIds = new ArrayList(_list706.size);
+ TxnToWriteId _elem707;
+ for (int _i708 = 0; _i708 < _list706.size; ++_i708)
{
- _elem699 = new TxnToWriteId();
- _elem699.read(iprot);
- struct.txnToWriteIds.add(_elem699);
+ _elem707 = new TxnToWriteId();
+ _elem707.read(iprot);
+ struct.txnToWriteIds.add(_elem707);
}
iprot.readListEnd();
}
@@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite
oprot.writeFieldBegin(TXN_TO_WRITE_IDS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.txnToWriteIds.size()));
- for (TxnToWriteId _iter701 : struct.txnToWriteIds)
+ for (TxnToWriteId _iter709 : struct.txnToWriteIds)
{
- _iter701.write(oprot);
+ _iter709.write(oprot);
}
oprot.writeListEnd();
}
@@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI
TTupleProtocol oprot = (TTupleProtocol) prot;
{
oprot.writeI32(struct.txnToWriteIds.size());
- for (TxnToWriteId _iter702 : struct.txnToWriteIds)
+ for (TxnToWriteId _iter710 : struct.txnToWriteIds)
{
- _iter702.write(oprot);
+ _iter710.write(oprot);
}
}
}
@@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI
public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdsResponse struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
{
- org.apache.thrift.protocol.TList _list703 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.txnToWriteIds = new ArrayList(_list703.size);
- TxnToWriteId _elem704;
- for (int _i705 = 0; _i705 < _list703.size; ++_i705)
+ org.apache.thrift.protocol.TList _list711 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.txnToWriteIds = new ArrayList(_list711.size);
+ TxnToWriteId _elem712;
+ for (int _i713 = 0; _i713 < _list711.size; ++_i713)
{
- _elem704 = new TxnToWriteId();
- _elem704.read(iprot);
- struct.txnToWriteIds.add(_elem704);
+ _elem712 = new TxnToWriteId();
+ _elem712.read(iprot);
+ struct.txnToWriteIds.add(_elem712);
}
}
struct.setTxnToWriteIdsIsSet(true);
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
index eeeae54dd2..30d130df10 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
@@ -877,14 +877,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsRequ
case 4: // PARTITIONS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list1024 = iprot.readListBegin();
- struct.partitions = new ArrayList(_list1024.size);
- Partition _elem1025;
- for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026)
+ org.apache.thrift.protocol.TList _list1032 = iprot.readListBegin();
+ struct.partitions = new ArrayList(_list1032.size);
+ Partition _elem1033;
+ for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034)
{
- _elem1025 = new Partition();
- _elem1025.read(iprot);
- struct.partitions.add(_elem1025);
+ _elem1033 = new Partition();
+ _elem1033.read(iprot);
+ struct.partitions.add(_elem1033);
}
iprot.readListEnd();
}
@@ -952,9 +952,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsReq
oprot.writeFieldBegin(PARTITIONS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size()));
- for (Partition _iter1027 : struct.partitions)
+ for (Partition _iter1035 : struct.partitions)
{
- _iter1027.write(oprot);
+ _iter1035.write(oprot);
}
oprot.writeListEnd();
}
@@ -1000,9 +1000,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequ
oprot.writeString(struct.tableName);
{
oprot.writeI32(struct.partitions.size());
- for (Partition _iter1028 : struct.partitions)
+ for (Partition _iter1036 : struct.partitions)
{
- _iter1028.write(oprot);
+ _iter1036.write(oprot);
}
}
BitSet optionals = new BitSet();
@@ -1041,14 +1041,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsReque
struct.tableName = iprot.readString();
struct.setTableNameIsSet(true);
{
- org.apache.thrift.protocol.TList _list1029 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.partitions = new ArrayList(_list1029.size);
- Partition _elem1030;
- for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031)
+ org.apache.thrift.protocol.TList _list1037 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.partitions = new ArrayList(_list1037.size);
+ Partition _elem1038;
+ for (int _i1039 = 0; _i1039 < _list1037.size; ++_i1039)
{
- _elem1030 = new Partition();
- _elem1030.read(iprot);
- struct.partitions.add(_elem1030);
+ _elem1038 = new Partition();
+ _elem1038.read(iprot);
+ struct.partitions.add(_elem1038);
}
}
struct.setPartitionsIsSet(true);
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java
index ca7628866a..31eccab231 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java
@@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClearFileMetadataRe
case 1: // FILE_IDS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list872 = iprot.readListBegin();
- struct.fileIds = new ArrayList(_list872.size);
- long _elem873;
- for (int _i874 = 0; _i874 < _list872.size; ++_i874)
+ org.apache.thrift.protocol.TList _list880 = iprot.readListBegin();
+ struct.fileIds = new ArrayList(_list880.size);
+ long _elem881;
+ for (int _i882 = 0; _i882 < _list880.size; ++_i882)
{
- _elem873 = iprot.readI64();
- struct.fileIds.add(_elem873);
+ _elem881 = iprot.readI64();
+ struct.fileIds.add(_elem881);
}
iprot.readListEnd();
}
@@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClearFileMetadataR
oprot.writeFieldBegin(FILE_IDS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size()));
- for (long _iter875 : struct.fileIds)
+ for (long _iter883 : struct.fileIds)
{
- oprot.writeI64(_iter875);
+ oprot.writeI64(_iter883);
}
oprot.writeListEnd();
}
@@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe
TTupleProtocol oprot = (TTupleProtocol) prot;
{
oprot.writeI32(struct.fileIds.size());
- for (long _iter876 : struct.fileIds)
+ for (long _iter884 : struct.fileIds)
{
- oprot.writeI64(_iter876);
+ oprot.writeI64(_iter884);
}
}
}
@@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe
public void read(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
{
- org.apache.thrift.protocol.TList _list877 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
- struct.fileIds = new ArrayList(_list877.size);
- long _elem878;
- for (int _i879 = 0; _i879 < _list877.size; ++_i879)
+ org.apache.thrift.protocol.TList _list885 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+ struct.fileIds = new ArrayList(_list885.size);
+ long _elem886;
+ for (int _i887 = 0; _i887 < _list885.size; ++_i887)
{
- _elem878 = iprot.readI64();
- struct.fileIds.add(_elem878);
+ _elem886 = iprot.readI64();
+ struct.fileIds.add(_elem886);
}
}
struct.setFileIdsIsSet(true);
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java
index c3cb11e14c..cce7cc08ba 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java
@@ -354,13 +354,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClientCapabilities
case 1: // VALUES
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list888 = iprot.readListBegin();
- struct.values = new ArrayList(_list888.size);
- ClientCapability _elem889;
- for (int _i890 = 0; _i890 < _list888.size; ++_i890)
+ org.apache.thrift.protocol.TList _list896 = iprot.readListBegin();
+ struct.values = new ArrayList(_list896.size);
+ ClientCapability _elem897;
+ for (int _i898 = 0; _i898 < _list896.size; ++_i898)
{
- _elem889 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32());
- struct.values.add(_elem889);
+ _elem897 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32());
+ struct.values.add(_elem897);
}
iprot.readListEnd();
}
@@ -386,9 +386,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClientCapabilities
oprot.writeFieldBegin(VALUES_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.values.size()));
- for (ClientCapability _iter891 : struct.values)
+ for (ClientCapability _iter899 : struct.values)
{
- oprot.writeI32(_iter891.getValue());
+ oprot.writeI32(_iter899.getValue());
}
oprot.writeListEnd();
}
@@ -413,9 +413,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities
TTupleProtocol oprot = (TTupleProtocol) prot;
{
oprot.writeI32(struct.values.size());
- for (ClientCapability _iter892 : struct.values)
+ for (ClientCapability _iter900 : struct.values)
{
- oprot.writeI32(_iter892.getValue());
+ oprot.writeI32(_iter900.getValue());
}
}
}
@@ -424,13 +424,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities
public void read(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
{
- org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32());
- struct.values = new ArrayList(_list893.size);
- ClientCapability _elem894;
- for (int _i895 = 0; _i895 < _list893.size; ++_i895)
+ org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32());
+ struct.values = new ArrayList(_list901.size);
+ ClientCapability _elem902;
+ for (int _i903 = 0; _i903 < _list901.size; ++_i903)
{
- _elem894 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32());
- struct.values.add(_elem894);
+ _elem902 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32());
+ struct.values.add(_elem902);
}
}
struct.setValuesIsSet(true);
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
index 5acd896fd3..147c91fb3f 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
@@ -814,15 +814,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionRequest s
case 6: // PROPERTIES
if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
{
- org.apache.thrift.protocol.TMap _map738 = iprot.readMapBegin();
- struct.properties = new HashMap(2*_map738.size);
- String _key739;
- String _val740;
- for (int _i741 = 0; _i741 < _map738.size; ++_i741)
+ org.apache.thrift.protocol.TMap _map746 = iprot.readMapBegin();
+ struct.properties = new HashMap(2*_map746.size);
+ String _key747;
+ String _val748;
+ for (int _i749 = 0; _i749 < _map746.size; ++_i749)
{
- _key739 = iprot.readString();
- _val740 = iprot.readString();
- struct.properties.put(_key739, _val740);
+ _key747 = iprot.readString();
+ _val748 = iprot.readString();
+ struct.properties.put(_key747, _val748);
}
iprot.readMapEnd();
}
@@ -878,10 +878,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionRequest
oprot.writeFieldBegin(PROPERTIES_FIELD_DESC);
{
oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size()));
- for (Map.Entry _iter742 : struct.properties.entrySet())
+ for (Map.Entry _iter750 : struct.properties.entrySet())
{
- oprot.writeString(_iter742.getKey());
- oprot.writeString(_iter742.getValue());
+ oprot.writeString(_iter750.getKey());
+ oprot.writeString(_iter750.getValue());
}
oprot.writeMapEnd();
}
@@ -928,10 +928,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionRequest s
if (struct.isSetProperties()) {
{
oprot.writeI32(struct.properties.size());
- for (Map.Entry _iter743 : struct.properties.entrySet())
+ for (Map.Entry _iter751 : struct.properties.entrySet())
{
- oprot.writeString(_iter743.getKey());
- oprot.writeString(_iter743.getValue());
+ oprot.writeString(_iter751.getKey());
+ oprot.writeString(_iter751.getValue());
}
}
}
@@ -957,15 +957,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionRequest st
}
if (incoming.get(2)) {
{
- org.apache.thrift.protocol.TMap _map744 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
- struct.properties = new HashMap(2*_map744.size);
- String _key745;
- String _val746;
- for (int _i747 = 0; _i747 < _map744.size; ++_i747)
+ org.apache.thrift.protocol.TMap _map752 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+ struct.properties = new HashMap(2*_map752.size);
+ String _key753;
+ String _val754;
+ for (int _i755 = 0; _i755 < _map752.size; ++_i755)
{
- _key745 = iprot.readString();
- _val746 = iprot.readString();
- struct.properties.put(_key745, _val746);
+ _key753 = iprot.readString();
+ _val754 = iprot.readString();
+ struct.properties.put(_key753, _val754);
}
}
struct.setPropertiesIsSet(true);
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateTableRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateTableRequest.java
new file mode 100644
index 0000000000..c2325c1c43
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateTableRequest.java
@@ -0,0 +1,1730 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CreateTableRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CreateTableRequest");
+
+ private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField ENV_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("envContext", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+ private static final org.apache.thrift.protocol.TField PRIMARY_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("primaryKeys", org.apache.thrift.protocol.TType.LIST, (short)3);
+ private static final org.apache.thrift.protocol.TField FOREIGN_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("foreignKeys", org.apache.thrift.protocol.TType.LIST, (short)4);
+ private static final org.apache.thrift.protocol.TField UNIQUE_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("uniqueConstraints", org.apache.thrift.protocol.TType.LIST, (short)5);
+ private static final org.apache.thrift.protocol.TField NOT_NULL_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("notNullConstraints", org.apache.thrift.protocol.TType.LIST, (short)6);
+ private static final org.apache.thrift.protocol.TField DEFAULT_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultConstraints", org.apache.thrift.protocol.TType.LIST, (short)7);
+ private static final org.apache.thrift.protocol.TField CHECK_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("checkConstraints", org.apache.thrift.protocol.TType.LIST, (short)8);
+ private static final org.apache.thrift.protocol.TField PROCESSOR_CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("processorCapabilities", org.apache.thrift.protocol.TType.LIST, (short)9);
+ private static final org.apache.thrift.protocol.TField PROCESSOR_IDENTIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("processorIdentifier", org.apache.thrift.protocol.TType.STRING, (short)10);
+
+ private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new CreateTableRequestStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new CreateTableRequestTupleSchemeFactory());
+ }
+
+ private Table table; // required
+ private EnvironmentContext envContext; // optional
+ private List primaryKeys; // optional
+ private List foreignKeys; // optional
+ private List uniqueConstraints; // optional
+ private List notNullConstraints; // optional
+ private List defaultConstraints; // optional
+ private List checkConstraints; // optional
+ private List processorCapabilities; // optional
+ private String processorIdentifier; // optional
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ TABLE((short)1, "table"),
+ ENV_CONTEXT((short)2, "envContext"),
+ PRIMARY_KEYS((short)3, "primaryKeys"),
+ FOREIGN_KEYS((short)4, "foreignKeys"),
+ UNIQUE_CONSTRAINTS((short)5, "uniqueConstraints"),
+ NOT_NULL_CONSTRAINTS((short)6, "notNullConstraints"),
+ DEFAULT_CONSTRAINTS((short)7, "defaultConstraints"),
+ CHECK_CONSTRAINTS((short)8, "checkConstraints"),
+ PROCESSOR_CAPABILITIES((short)9, "processorCapabilities"),
+ PROCESSOR_IDENTIFIER((short)10, "processorIdentifier");
+
+ private static final Map byName = new HashMap();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // TABLE
+ return TABLE;
+ case 2: // ENV_CONTEXT
+ return ENV_CONTEXT;
+ case 3: // PRIMARY_KEYS
+ return PRIMARY_KEYS;
+ case 4: // FOREIGN_KEYS
+ return FOREIGN_KEYS;
+ case 5: // UNIQUE_CONSTRAINTS
+ return UNIQUE_CONSTRAINTS;
+ case 6: // NOT_NULL_CONSTRAINTS
+ return NOT_NULL_CONSTRAINTS;
+ case 7: // DEFAULT_CONSTRAINTS
+ return DEFAULT_CONSTRAINTS;
+ case 8: // CHECK_CONSTRAINTS
+ return CHECK_CONSTRAINTS;
+ case 9: // PROCESSOR_CAPABILITIES
+ return PROCESSOR_CAPABILITIES;
+ case 10: // PROCESSOR_IDENTIFIER
+ return PROCESSOR_IDENTIFIER;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ private static final _Fields optionals[] = {_Fields.ENV_CONTEXT,_Fields.PRIMARY_KEYS,_Fields.FOREIGN_KEYS,_Fields.UNIQUE_CONSTRAINTS,_Fields.NOT_NULL_CONSTRAINTS,_Fields.DEFAULT_CONSTRAINTS,_Fields.CHECK_CONSTRAINTS,_Fields.PROCESSOR_CAPABILITIES,_Fields.PROCESSOR_IDENTIFIER};
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class)));
+ tmpMap.put(_Fields.ENV_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("envContext", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class)));
+ tmpMap.put(_Fields.PRIMARY_KEYS, new org.apache.thrift.meta_data.FieldMetaData("primaryKeys", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLPrimaryKey.class))));
+ tmpMap.put(_Fields.FOREIGN_KEYS, new org.apache.thrift.meta_data.FieldMetaData("foreignKeys", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLForeignKey.class))));
+ tmpMap.put(_Fields.UNIQUE_CONSTRAINTS, new org.apache.thrift.meta_data.FieldMetaData("uniqueConstraints", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLUniqueConstraint.class))));
+ tmpMap.put(_Fields.NOT_NULL_CONSTRAINTS, new org.apache.thrift.meta_data.FieldMetaData("notNullConstraints", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLNotNullConstraint.class))));
+ tmpMap.put(_Fields.DEFAULT_CONSTRAINTS, new org.apache.thrift.meta_data.FieldMetaData("defaultConstraints", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLDefaultConstraint.class))));
+ tmpMap.put(_Fields.CHECK_CONSTRAINTS, new org.apache.thrift.meta_data.FieldMetaData("checkConstraints", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLCheckConstraint.class))));
+ tmpMap.put(_Fields.PROCESSOR_CAPABILITIES, new org.apache.thrift.meta_data.FieldMetaData("processorCapabilities", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+ tmpMap.put(_Fields.PROCESSOR_IDENTIFIER, new org.apache.thrift.meta_data.FieldMetaData("processorIdentifier", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CreateTableRequest.class, metaDataMap);
+ }
+
+ public CreateTableRequest() {
+ }
+
+ public CreateTableRequest(
+ Table table)
+ {
+ this();
+ this.table = table;
+ }
+
+ /**
+ * Performs a deep copy on other.
+ */
+ public CreateTableRequest(CreateTableRequest other) {
+ if (other.isSetTable()) {
+ this.table = new Table(other.table);
+ }
+ if (other.isSetEnvContext()) {
+ this.envContext = new EnvironmentContext(other.envContext);
+ }
+ if (other.isSetPrimaryKeys()) {
+ List __this__primaryKeys = new ArrayList(other.primaryKeys.size());
+ for (SQLPrimaryKey other_element : other.primaryKeys) {
+ __this__primaryKeys.add(new SQLPrimaryKey(other_element));
+ }
+ this.primaryKeys = __this__primaryKeys;
+ }
+ if (other.isSetForeignKeys()) {
+ List __this__foreignKeys = new ArrayList(other.foreignKeys.size());
+ for (SQLForeignKey other_element : other.foreignKeys) {
+ __this__foreignKeys.add(new SQLForeignKey(other_element));
+ }
+ this.foreignKeys = __this__foreignKeys;
+ }
+ if (other.isSetUniqueConstraints()) {
+ List __this__uniqueConstraints = new ArrayList(other.uniqueConstraints.size());
+ for (SQLUniqueConstraint other_element : other.uniqueConstraints) {
+ __this__uniqueConstraints.add(new SQLUniqueConstraint(other_element));
+ }
+ this.uniqueConstraints = __this__uniqueConstraints;
+ }
+ if (other.isSetNotNullConstraints()) {
+ List __this__notNullConstraints = new ArrayList(other.notNullConstraints.size());
+ for (SQLNotNullConstraint other_element : other.notNullConstraints) {
+ __this__notNullConstraints.add(new SQLNotNullConstraint(other_element));
+ }
+ this.notNullConstraints = __this__notNullConstraints;
+ }
+ if (other.isSetDefaultConstraints()) {
+ List __this__defaultConstraints = new ArrayList(other.defaultConstraints.size());
+ for (SQLDefaultConstraint other_element : other.defaultConstraints) {
+ __this__defaultConstraints.add(new SQLDefaultConstraint(other_element));
+ }
+ this.defaultConstraints = __this__defaultConstraints;
+ }
+ if (other.isSetCheckConstraints()) {
+ List __this__checkConstraints = new ArrayList(other.checkConstraints.size());
+ for (SQLCheckConstraint other_element : other.checkConstraints) {
+ __this__checkConstraints.add(new SQLCheckConstraint(other_element));
+ }
+ this.checkConstraints = __this__checkConstraints;
+ }
+ if (other.isSetProcessorCapabilities()) {
+ List __this__processorCapabilities = new ArrayList(other.processorCapabilities);
+ this.processorCapabilities = __this__processorCapabilities;
+ }
+ if (other.isSetProcessorIdentifier()) {
+ this.processorIdentifier = other.processorIdentifier;
+ }
+ }
+
+ public CreateTableRequest deepCopy() {
+ return new CreateTableRequest(this);
+ }
+
+ @Override
+ public void clear() {
+ this.table = null;
+ this.envContext = null;
+ this.primaryKeys = null;
+ this.foreignKeys = null;
+ this.uniqueConstraints = null;
+ this.notNullConstraints = null;
+ this.defaultConstraints = null;
+ this.checkConstraints = null;
+ this.processorCapabilities = null;
+ this.processorIdentifier = null;
+ }
+
+ public Table getTable() {
+ return this.table;
+ }
+
+ public void setTable(Table table) {
+ this.table = table;
+ }
+
+ public void unsetTable() {
+ this.table = null;
+ }
+
+ /** Returns true if field table is set (has been assigned a value) and false otherwise */
+ public boolean isSetTable() {
+ return this.table != null;
+ }
+
+ public void setTableIsSet(boolean value) {
+ if (!value) {
+ this.table = null;
+ }
+ }
+
+ public EnvironmentContext getEnvContext() {
+ return this.envContext;
+ }
+
+ public void setEnvContext(EnvironmentContext envContext) {
+ this.envContext = envContext;
+ }
+
+ public void unsetEnvContext() {
+ this.envContext = null;
+ }
+
+ /** Returns true if field envContext is set (has been assigned a value) and false otherwise */
+ public boolean isSetEnvContext() {
+ return this.envContext != null;
+ }
+
+ public void setEnvContextIsSet(boolean value) {
+ if (!value) {
+ this.envContext = null;
+ }
+ }
+
+ public int getPrimaryKeysSize() {
+ return (this.primaryKeys == null) ? 0 : this.primaryKeys.size();
+ }
+
+ public java.util.Iterator getPrimaryKeysIterator() {
+ return (this.primaryKeys == null) ? null : this.primaryKeys.iterator();
+ }
+
+ public void addToPrimaryKeys(SQLPrimaryKey elem) {
+ if (this.primaryKeys == null) {
+ this.primaryKeys = new ArrayList();
+ }
+ this.primaryKeys.add(elem);
+ }
+
+ public List getPrimaryKeys() {
+ return this.primaryKeys;
+ }
+
+ public void setPrimaryKeys(List primaryKeys) {
+ this.primaryKeys = primaryKeys;
+ }
+
+ public void unsetPrimaryKeys() {
+ this.primaryKeys = null;
+ }
+
+ /** Returns true if field primaryKeys is set (has been assigned a value) and false otherwise */
+ public boolean isSetPrimaryKeys() {
+ return this.primaryKeys != null;
+ }
+
+ public void setPrimaryKeysIsSet(boolean value) {
+ if (!value) {
+ this.primaryKeys = null;
+ }
+ }
+
+ public int getForeignKeysSize() {
+ return (this.foreignKeys == null) ? 0 : this.foreignKeys.size();
+ }
+
+ public java.util.Iterator getForeignKeysIterator() {
+ return (this.foreignKeys == null) ? null : this.foreignKeys.iterator();
+ }
+
+ public void addToForeignKeys(SQLForeignKey elem) {
+ if (this.foreignKeys == null) {
+ this.foreignKeys = new ArrayList();
+ }
+ this.foreignKeys.add(elem);
+ }
+
+ public List getForeignKeys() {
+ return this.foreignKeys;
+ }
+
+ public void setForeignKeys(List foreignKeys) {
+ this.foreignKeys = foreignKeys;
+ }
+
+ public void unsetForeignKeys() {
+ this.foreignKeys = null;
+ }
+
+ /** Returns true if field foreignKeys is set (has been assigned a value) and false otherwise */
+ public boolean isSetForeignKeys() {
+ return this.foreignKeys != null;
+ }
+
+ public void setForeignKeysIsSet(boolean value) {
+ if (!value) {
+ this.foreignKeys = null;
+ }
+ }
+
+ public int getUniqueConstraintsSize() {
+ return (this.uniqueConstraints == null) ? 0 : this.uniqueConstraints.size();
+ }
+
+ public java.util.Iterator getUniqueConstraintsIterator() {
+ return (this.uniqueConstraints == null) ? null : this.uniqueConstraints.iterator();
+ }
+
+ public void addToUniqueConstraints(SQLUniqueConstraint elem) {
+ if (this.uniqueConstraints == null) {
+ this.uniqueConstraints = new ArrayList();
+ }
+ this.uniqueConstraints.add(elem);
+ }
+
+ public List getUniqueConstraints() {
+ return this.uniqueConstraints;
+ }
+
+ public void setUniqueConstraints(List uniqueConstraints) {
+ this.uniqueConstraints = uniqueConstraints;
+ }
+
+ public void unsetUniqueConstraints() {
+ this.uniqueConstraints = null;
+ }
+
+ /** Returns true if field uniqueConstraints is set (has been assigned a value) and false otherwise */
+ public boolean isSetUniqueConstraints() {
+ return this.uniqueConstraints != null;
+ }
+
+ public void setUniqueConstraintsIsSet(boolean value) {
+ if (!value) {
+ this.uniqueConstraints = null;
+ }
+ }
+
+ public int getNotNullConstraintsSize() {
+ return (this.notNullConstraints == null) ? 0 : this.notNullConstraints.size();
+ }
+
+ public java.util.Iterator getNotNullConstraintsIterator() {
+ return (this.notNullConstraints == null) ? null : this.notNullConstraints.iterator();
+ }
+
+ public void addToNotNullConstraints(SQLNotNullConstraint elem) {
+ if (this.notNullConstraints == null) {
+ this.notNullConstraints = new ArrayList();
+ }
+ this.notNullConstraints.add(elem);
+ }
+
+ public List getNotNullConstraints() {
+ return this.notNullConstraints;
+ }
+
+ public void setNotNullConstraints(List notNullConstraints) {
+ this.notNullConstraints = notNullConstraints;
+ }
+
+ public void unsetNotNullConstraints() {
+ this.notNullConstraints = null;
+ }
+
+ /** Returns true if field notNullConstraints is set (has been assigned a value) and false otherwise */
+ public boolean isSetNotNullConstraints() {
+ return this.notNullConstraints != null;
+ }
+
+ public void setNotNullConstraintsIsSet(boolean value) {
+ if (!value) {
+ this.notNullConstraints = null;
+ }
+ }
+
+ public int getDefaultConstraintsSize() {
+ return (this.defaultConstraints == null) ? 0 : this.defaultConstraints.size();
+ }
+
+ public java.util.Iterator getDefaultConstraintsIterator() {
+ return (this.defaultConstraints == null) ? null : this.defaultConstraints.iterator();
+ }
+
+ public void addToDefaultConstraints(SQLDefaultConstraint elem) {
+ if (this.defaultConstraints == null) {
+ this.defaultConstraints = new ArrayList();
+ }
+ this.defaultConstraints.add(elem);
+ }
+
+ public List getDefaultConstraints() {
+ return this.defaultConstraints;
+ }
+
+ public void setDefaultConstraints(List defaultConstraints) {
+ this.defaultConstraints = defaultConstraints;
+ }
+
+ public void unsetDefaultConstraints() {
+ this.defaultConstraints = null;
+ }
+
+ /** Returns true if field defaultConstraints is set (has been assigned a value) and false otherwise */
+ public boolean isSetDefaultConstraints() {
+ return this.defaultConstraints != null;
+ }
+
+ public void setDefaultConstraintsIsSet(boolean value) {
+ if (!value) {
+ this.defaultConstraints = null;
+ }
+ }
+
+ public int getCheckConstraintsSize() {
+ return (this.checkConstraints == null) ? 0 : this.checkConstraints.size();
+ }
+
+ public java.util.Iterator getCheckConstraintsIterator() {
+ return (this.checkConstraints == null) ? null : this.checkConstraints.iterator();
+ }
+
+ public void addToCheckConstraints(SQLCheckConstraint elem) {
+ if (this.checkConstraints == null) {
+ this.checkConstraints = new ArrayList();
+ }
+ this.checkConstraints.add(elem);
+ }
+
+ public List getCheckConstraints() {
+ return this.checkConstraints;
+ }
+
+ public void setCheckConstraints(List checkConstraints) {
+ this.checkConstraints = checkConstraints;
+ }
+
+ public void unsetCheckConstraints() {
+ this.checkConstraints = null;
+ }
+
+ /** Returns true if field checkConstraints is set (has been assigned a value) and false otherwise */
+ public boolean isSetCheckConstraints() {
+ return this.checkConstraints != null;
+ }
+
+ public void setCheckConstraintsIsSet(boolean value) {
+ if (!value) {
+ this.checkConstraints = null;
+ }
+ }
+
+ public int getProcessorCapabilitiesSize() {
+ return (this.processorCapabilities == null) ? 0 : this.processorCapabilities.size();
+ }
+
+ public java.util.Iterator getProcessorCapabilitiesIterator() {
+ return (this.processorCapabilities == null) ? null : this.processorCapabilities.iterator();
+ }
+
+ public void addToProcessorCapabilities(String elem) {
+ if (this.processorCapabilities == null) {
+ this.processorCapabilities = new ArrayList();
+ }
+ this.processorCapabilities.add(elem);
+ }
+
+ public List getProcessorCapabilities() {
+ return this.processorCapabilities;
+ }
+
+ public void setProcessorCapabilities(List processorCapabilities) {
+ this.processorCapabilities = processorCapabilities;
+ }
+
+ public void unsetProcessorCapabilities() {
+ this.processorCapabilities = null;
+ }
+
+ /** Returns true if field processorCapabilities is set (has been assigned a value) and false otherwise */
+ public boolean isSetProcessorCapabilities() {
+ return this.processorCapabilities != null;
+ }
+
+ public void setProcessorCapabilitiesIsSet(boolean value) {
+ if (!value) {
+ this.processorCapabilities = null;
+ }
+ }
+
+ public String getProcessorIdentifier() {
+ return this.processorIdentifier;
+ }
+
+ public void setProcessorIdentifier(String processorIdentifier) {
+ this.processorIdentifier = processorIdentifier;
+ }
+
+ public void unsetProcessorIdentifier() {
+ this.processorIdentifier = null;
+ }
+
+ /** Returns true if field processorIdentifier is set (has been assigned a value) and false otherwise */
+ public boolean isSetProcessorIdentifier() {
+ return this.processorIdentifier != null;
+ }
+
+ public void setProcessorIdentifierIsSet(boolean value) {
+ if (!value) {
+ this.processorIdentifier = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case TABLE:
+ if (value == null) {
+ unsetTable();
+ } else {
+ setTable((Table)value);
+ }
+ break;
+
+ case ENV_CONTEXT:
+ if (value == null) {
+ unsetEnvContext();
+ } else {
+ setEnvContext((EnvironmentContext)value);
+ }
+ break;
+
+ case PRIMARY_KEYS:
+ if (value == null) {
+ unsetPrimaryKeys();
+ } else {
+ setPrimaryKeys((List)value);
+ }
+ break;
+
+ case FOREIGN_KEYS:
+ if (value == null) {
+ unsetForeignKeys();
+ } else {
+ setForeignKeys((List)value);
+ }
+ break;
+
+ case UNIQUE_CONSTRAINTS:
+ if (value == null) {
+ unsetUniqueConstraints();
+ } else {
+ setUniqueConstraints((List)value);
+ }
+ break;
+
+ case NOT_NULL_CONSTRAINTS:
+ if (value == null) {
+ unsetNotNullConstraints();
+ } else {
+ setNotNullConstraints((List)value);
+ }
+ break;
+
+ case DEFAULT_CONSTRAINTS:
+ if (value == null) {
+ unsetDefaultConstraints();
+ } else {
+ setDefaultConstraints((List)value);
+ }
+ break;
+
+ case CHECK_CONSTRAINTS:
+ if (value == null) {
+ unsetCheckConstraints();
+ } else {
+ setCheckConstraints((List)value);
+ }
+ break;
+
+ case PROCESSOR_CAPABILITIES:
+ if (value == null) {
+ unsetProcessorCapabilities();
+ } else {
+ setProcessorCapabilities((List)value);
+ }
+ break;
+
+ case PROCESSOR_IDENTIFIER:
+ if (value == null) {
+ unsetProcessorIdentifier();
+ } else {
+ setProcessorIdentifier((String)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case TABLE:
+ return getTable();
+
+ case ENV_CONTEXT:
+ return getEnvContext();
+
+ case PRIMARY_KEYS:
+ return getPrimaryKeys();
+
+ case FOREIGN_KEYS:
+ return getForeignKeys();
+
+ case UNIQUE_CONSTRAINTS:
+ return getUniqueConstraints();
+
+ case NOT_NULL_CONSTRAINTS:
+ return getNotNullConstraints();
+
+ case DEFAULT_CONSTRAINTS:
+ return getDefaultConstraints();
+
+ case CHECK_CONSTRAINTS:
+ return getCheckConstraints();
+
+ case PROCESSOR_CAPABILITIES:
+ return getProcessorCapabilities();
+
+ case PROCESSOR_IDENTIFIER:
+ return getProcessorIdentifier();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case TABLE:
+ return isSetTable();
+ case ENV_CONTEXT:
+ return isSetEnvContext();
+ case PRIMARY_KEYS:
+ return isSetPrimaryKeys();
+ case FOREIGN_KEYS:
+ return isSetForeignKeys();
+ case UNIQUE_CONSTRAINTS:
+ return isSetUniqueConstraints();
+ case NOT_NULL_CONSTRAINTS:
+ return isSetNotNullConstraints();
+ case DEFAULT_CONSTRAINTS:
+ return isSetDefaultConstraints();
+ case CHECK_CONSTRAINTS:
+ return isSetCheckConstraints();
+ case PROCESSOR_CAPABILITIES:
+ return isSetProcessorCapabilities();
+ case PROCESSOR_IDENTIFIER:
+ return isSetProcessorIdentifier();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof CreateTableRequest)
+ return this.equals((CreateTableRequest)that);
+ return false;
+ }
+
+ public boolean equals(CreateTableRequest that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_table = true && this.isSetTable();
+ boolean that_present_table = true && that.isSetTable();
+ if (this_present_table || that_present_table) {
+ if (!(this_present_table && that_present_table))
+ return false;
+ if (!this.table.equals(that.table))
+ return false;
+ }
+
+ boolean this_present_envContext = true && this.isSetEnvContext();
+ boolean that_present_envContext = true && that.isSetEnvContext();
+ if (this_present_envContext || that_present_envContext) {
+ if (!(this_present_envContext && that_present_envContext))
+ return false;
+ if (!this.envContext.equals(that.envContext))
+ return false;
+ }
+
+ boolean this_present_primaryKeys = true && this.isSetPrimaryKeys();
+ boolean that_present_primaryKeys = true && that.isSetPrimaryKeys();
+ if (this_present_primaryKeys || that_present_primaryKeys) {
+ if (!(this_present_primaryKeys && that_present_primaryKeys))
+ return false;
+ if (!this.primaryKeys.equals(that.primaryKeys))
+ return false;
+ }
+
+ boolean this_present_foreignKeys = true && this.isSetForeignKeys();
+ boolean that_present_foreignKeys = true && that.isSetForeignKeys();
+ if (this_present_foreignKeys || that_present_foreignKeys) {
+ if (!(this_present_foreignKeys && that_present_foreignKeys))
+ return false;
+ if (!this.foreignKeys.equals(that.foreignKeys))
+ return false;
+ }
+
+ boolean this_present_uniqueConstraints = true && this.isSetUniqueConstraints();
+ boolean that_present_uniqueConstraints = true && that.isSetUniqueConstraints();
+ if (this_present_uniqueConstraints || that_present_uniqueConstraints) {
+ if (!(this_present_uniqueConstraints && that_present_uniqueConstraints))
+ return false;
+ if (!this.uniqueConstraints.equals(that.uniqueConstraints))
+ return false;
+ }
+
+ boolean this_present_notNullConstraints = true && this.isSetNotNullConstraints();
+ boolean that_present_notNullConstraints = true && that.isSetNotNullConstraints();
+ if (this_present_notNullConstraints || that_present_notNullConstraints) {
+ if (!(this_present_notNullConstraints && that_present_notNullConstraints))
+ return false;
+ if (!this.notNullConstraints.equals(that.notNullConstraints))
+ return false;
+ }
+
+ boolean this_present_defaultConstraints = true && this.isSetDefaultConstraints();
+ boolean that_present_defaultConstraints = true && that.isSetDefaultConstraints();
+ if (this_present_defaultConstraints || that_present_defaultConstraints) {
+ if (!(this_present_defaultConstraints && that_present_defaultConstraints))
+ return false;
+ if (!this.defaultConstraints.equals(that.defaultConstraints))
+ return false;
+ }
+
+ boolean this_present_checkConstraints = true && this.isSetCheckConstraints();
+ boolean that_present_checkConstraints = true && that.isSetCheckConstraints();
+ if (this_present_checkConstraints || that_present_checkConstraints) {
+ if (!(this_present_checkConstraints && that_present_checkConstraints))
+ return false;
+ if (!this.checkConstraints.equals(that.checkConstraints))
+ return false;
+ }
+
+ boolean this_present_processorCapabilities = true && this.isSetProcessorCapabilities();
+ boolean that_present_processorCapabilities = true && that.isSetProcessorCapabilities();
+ if (this_present_processorCapabilities || that_present_processorCapabilities) {
+ if (!(this_present_processorCapabilities && that_present_processorCapabilities))
+ return false;
+ if (!this.processorCapabilities.equals(that.processorCapabilities))
+ return false;
+ }
+
+ boolean this_present_processorIdentifier = true && this.isSetProcessorIdentifier();
+ boolean that_present_processorIdentifier = true && that.isSetProcessorIdentifier();
+ if (this_present_processorIdentifier || that_present_processorIdentifier) {
+ if (!(this_present_processorIdentifier && that_present_processorIdentifier))
+ return false;
+ if (!this.processorIdentifier.equals(that.processorIdentifier))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List